##// END OF EJS Templates
localrepo: add separate methods for manipulating repository data...
Matt Mackall -
r3457:ff06fe07 default
parent child Browse files
Show More
@@ -1,1819 +1,1824 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
51 self.wopener = util.opener(self.root)
51
52
52 try:
53 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
55 except IOError:
55 pass
56 pass
56
57
57 v = self.ui.configrevlog()
58 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
61 fl = v.get('flags', None)
61 flags = 0
62 flags = 0
62 if fl != None:
63 if fl != None:
63 for x in fl.split():
64 for x in fl.split():
64 flags |= revlog.flagstr(x)
65 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
66 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 flags = revlog.REVLOG_DEFAULT_FLAGS
67
68
68 v = self.revlogversion | flags
69 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
70 self.manifest = manifest.manifest(self.sopener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
71
72
72 # the changelog might not have the inline index flag
73 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
74 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
76 # Otherwise, just version from the changelog
76 v = self.changelog.version
77 v = self.changelog.version
77 if v == self.revlogversion:
78 if v == self.revlogversion:
78 v |= flags
79 v |= flags
79 self.revlogversion = v
80 self.revlogversion = v
80
81
81 self.tagscache = None
82 self.tagscache = None
82 self.branchcache = None
83 self.branchcache = None
83 self.nodetagscache = None
84 self.nodetagscache = None
84 self.encodepats = None
85 self.encodepats = None
85 self.decodepats = None
86 self.decodepats = None
86 self.transhandle = None
87 self.transhandle = None
87
88
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
90
90 def url(self):
91 def url(self):
91 return 'file:' + self.root
92 return 'file:' + self.root
92
93
93 def hook(self, name, throw=False, **args):
94 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
95 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
96 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
97 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
98 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
99 hook failure. exception propagates if throw is "true".
99
100
100 reason for "true" meaning "hook failed" is so that
101 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
102 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
103 be run as hooks without wrappers to convert return values.'''
103
104
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
106 d = funcname.rfind('.')
106 if d == -1:
107 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
109 % (hname, funcname))
109 modname = funcname[:d]
110 modname = funcname[:d]
110 try:
111 try:
111 obj = __import__(modname)
112 obj = __import__(modname)
112 except ImportError:
113 except ImportError:
113 try:
114 try:
114 # extensions are loaded with hgext_ prefix
115 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
116 obj = __import__("hgext_%s" % modname)
116 except ImportError:
117 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
118 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
119 '(import of "%s" failed)') %
119 (hname, modname))
120 (hname, modname))
120 try:
121 try:
121 for p in funcname.split('.')[1:]:
122 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
123 obj = getattr(obj, p)
123 except AttributeError, err:
124 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
125 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
126 '("%s" is not defined)') %
126 (hname, funcname))
127 (hname, funcname))
127 if not callable(obj):
128 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
129 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
130 '("%s" is not callable)') %
130 (hname, funcname))
131 (hname, funcname))
131 try:
132 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
135 raise
135 except Exception, exc:
136 except Exception, exc:
136 if isinstance(exc, util.Abort):
137 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
139 (hname, exc.args[0]))
139 else:
140 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
141 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
142 '%s\n') % (hname, exc))
142 if throw:
143 if throw:
143 raise
144 raise
144 self.ui.print_exc()
145 self.ui.print_exc()
145 return True
146 return True
146 if r:
147 if r:
147 if throw:
148 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
149 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
151 return r
151
152
152 def runhook(name, cmd):
153 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
156 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
157 if r:
157 desc, r = util.explain_exit(r)
158 desc, r = util.explain_exit(r)
158 if throw:
159 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
162 return r
162
163
163 r = False
164 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
166 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
167 hooks.sort()
167 for hname, cmd in hooks:
168 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
169 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
170 r = callhook(hname, cmd[7:].strip()) or r
170 else:
171 else:
171 r = runhook(hname, cmd) or r
172 r = runhook(hname, cmd) or r
172 return r
173 return r
173
174
174 tag_disallowed = ':\r\n'
175 tag_disallowed = ':\r\n'
175
176
176 def tag(self, name, node, message, local, user, date):
177 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
178 '''tag a revision with a symbolic name.
178
179
179 if local is True, the tag is stored in a per-repository file.
180 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
181 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
182 changeset is committed with the change.
182
183
183 keyword arguments:
184 keyword arguments:
184
185
185 local: whether to store tag in non-version-controlled file
186 local: whether to store tag in non-version-controlled file
186 (default False)
187 (default False)
187
188
188 message: commit message to use if committing
189 message: commit message to use if committing
189
190
190 user: name of user to use if committing
191 user: name of user to use if committing
191
192
192 date: date tuple to use if committing'''
193 date: date tuple to use if committing'''
193
194
194 for c in self.tag_disallowed:
195 for c in self.tag_disallowed:
195 if c in name:
196 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
198
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
200
200 if local:
201 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
204 return
204
205
205 for x in self.status()[:5]:
206 for x in self.status()[:5]:
206 if '.hgtags' in x:
207 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
208 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
209 '(please commit .hgtags manually)'))
209
210
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
212 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
213 self.add(['.hgtags'])
213
214
214 self.commit(['.hgtags'], message, user, date)
215 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216 self.hook('tag', node=hex(node), tag=name, local=local)
216
217
217 def tags(self):
218 def tags(self):
218 '''return a mapping of tag to node'''
219 '''return a mapping of tag to node'''
219 if not self.tagscache:
220 if not self.tagscache:
220 self.tagscache = {}
221 self.tagscache = {}
221
222
222 def parsetag(line, context):
223 def parsetag(line, context):
223 if not line:
224 if not line:
224 return
225 return
225 s = l.split(" ", 1)
226 s = l.split(" ", 1)
226 if len(s) != 2:
227 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
229 return
229 node, key = s
230 node, key = s
230 key = key.strip()
231 key = key.strip()
231 try:
232 try:
232 bin_n = bin(node)
233 bin_n = bin(node)
233 except TypeError:
234 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
236 (context, node))
236 return
237 return
237 if bin_n not in self.changelog.nodemap:
238 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
240 (context, key))
240 return
241 return
241 self.tagscache[key] = bin_n
242 self.tagscache[key] = bin_n
242
243
243 # read the tags file from each head, ending with the tip,
244 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
245 # and add each tag found to the map, with "newer" ones
245 # taking precedence
246 # taking precedence
246 heads = self.heads()
247 heads = self.heads()
247 heads.reverse()
248 heads.reverse()
248 seen = {}
249 seen = {}
249 for node in heads:
250 for node in heads:
250 f = self.filectx('.hgtags', node)
251 f = self.filectx('.hgtags', node)
251 if not f or f.filerev() in seen: continue
252 if not f or f.filerev() in seen: continue
252 seen[f.filerev()] = 1
253 seen[f.filerev()] = 1
253 count = 0
254 count = 0
254 for l in f.data().splitlines():
255 for l in f.data().splitlines():
255 count += 1
256 count += 1
256 parsetag(l, _("%s, line %d") % (str(f), count))
257 parsetag(l, _("%s, line %d") % (str(f), count))
257
258
258 try:
259 try:
259 f = self.opener("localtags")
260 f = self.opener("localtags")
260 count = 0
261 count = 0
261 for l in f:
262 for l in f:
262 count += 1
263 count += 1
263 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
265 except IOError:
265 pass
266 pass
266
267
267 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
268
269
269 return self.tagscache
270 return self.tagscache
270
271
271 def tagslist(self):
272 def tagslist(self):
272 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
273 l = []
274 l = []
274 for t, n in self.tags().items():
275 for t, n in self.tags().items():
275 try:
276 try:
276 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
277 except:
278 except:
278 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
280 l.append((r, t, n))
280 l.sort()
281 l.sort()
281 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
282
283
283 def nodetags(self, node):
284 def nodetags(self, node):
284 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
285 if not self.nodetagscache:
286 if not self.nodetagscache:
286 self.nodetagscache = {}
287 self.nodetagscache = {}
287 for t, n in self.tags().items():
288 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
290
291
291 def branchtags(self):
292 def branchtags(self):
292 if self.branchcache != None:
293 if self.branchcache != None:
293 return self.branchcache
294 return self.branchcache
294
295
295 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
296
297
297 try:
298 try:
298 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
299 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
301 if (lrev < self.changelog.count() and
302 if (lrev < self.changelog.count() and
302 self.changelog.node(lrev) == last): # sanity check
303 self.changelog.node(lrev) == last): # sanity check
303 for l in f:
304 for l in f:
304 node, label = l.rstrip().split(" ", 1)
305 node, label = l.rstrip().split(" ", 1)
305 self.branchcache[label] = bin(node)
306 self.branchcache[label] = bin(node)
306 else: # invalidate the cache
307 else: # invalidate the cache
307 last, lrev = nullid, -1
308 last, lrev = nullid, -1
308 f.close()
309 f.close()
309 except IOError:
310 except IOError:
310 last, lrev = nullid, -1
311 last, lrev = nullid, -1
311
312
312 tip = self.changelog.count() - 1
313 tip = self.changelog.count() - 1
313 if lrev != tip:
314 if lrev != tip:
314 for r in xrange(lrev + 1, tip + 1):
315 for r in xrange(lrev + 1, tip + 1):
315 c = self.changectx(r)
316 c = self.changectx(r)
316 b = c.branch()
317 b = c.branch()
317 if b:
318 if b:
318 self.branchcache[b] = c.node()
319 self.branchcache[b] = c.node()
319 self._writebranchcache()
320 self._writebranchcache()
320
321
321 return self.branchcache
322 return self.branchcache
322
323
323 def _writebranchcache(self):
324 def _writebranchcache(self):
324 try:
325 try:
325 f = self.opener("branches.cache", "w")
326 f = self.opener("branches.cache", "w")
326 t = self.changelog.tip()
327 t = self.changelog.tip()
327 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 for label, node in self.branchcache.iteritems():
329 for label, node in self.branchcache.iteritems():
329 f.write("%s %s\n" % (hex(node), label))
330 f.write("%s %s\n" % (hex(node), label))
330 except IOError:
331 except IOError:
331 pass
332 pass
332
333
333 def lookup(self, key):
334 def lookup(self, key):
334 if key == '.':
335 if key == '.':
335 key = self.dirstate.parents()[0]
336 key = self.dirstate.parents()[0]
336 if key == nullid:
337 if key == nullid:
337 raise repo.RepoError(_("no revision checked out"))
338 raise repo.RepoError(_("no revision checked out"))
338 n = self.changelog._match(key)
339 n = self.changelog._match(key)
339 if n:
340 if n:
340 return n
341 return n
341 if key in self.tags():
342 if key in self.tags():
342 return self.tags()[key]
343 return self.tags()[key]
343 if key in self.branchtags():
344 if key in self.branchtags():
344 return self.branchtags()[key]
345 return self.branchtags()[key]
345 n = self.changelog._partialmatch(key)
346 n = self.changelog._partialmatch(key)
346 if n:
347 if n:
347 return n
348 return n
348 raise repo.RepoError(_("unknown revision '%s'") % key)
349 raise repo.RepoError(_("unknown revision '%s'") % key)
349
350
350 def dev(self):
351 def dev(self):
351 return os.lstat(self.path).st_dev
352 return os.lstat(self.path).st_dev
352
353
353 def local(self):
354 def local(self):
354 return True
355 return True
355
356
356 def join(self, f):
357 def join(self, f):
357 return os.path.join(self.path, f)
358 return os.path.join(self.path, f)
358
359
360 def sjoin(self, f):
361 return os.path.join(self.path, f)
362
359 def wjoin(self, f):
363 def wjoin(self, f):
360 return os.path.join(self.root, f)
364 return os.path.join(self.root, f)
361
365
362 def file(self, f):
366 def file(self, f):
363 if f[0] == '/':
367 if f[0] == '/':
364 f = f[1:]
368 f = f[1:]
365 return filelog.filelog(self.opener, f, self.revlogversion)
369 return filelog.filelog(self.sopener, f, self.revlogversion)
366
370
367 def changectx(self, changeid=None):
371 def changectx(self, changeid=None):
368 return context.changectx(self, changeid)
372 return context.changectx(self, changeid)
369
373
370 def workingctx(self):
374 def workingctx(self):
371 return context.workingctx(self)
375 return context.workingctx(self)
372
376
373 def parents(self, changeid=None):
377 def parents(self, changeid=None):
374 '''
378 '''
375 get list of changectxs for parents of changeid or working directory
379 get list of changectxs for parents of changeid or working directory
376 '''
380 '''
377 if changeid is None:
381 if changeid is None:
378 pl = self.dirstate.parents()
382 pl = self.dirstate.parents()
379 else:
383 else:
380 n = self.changelog.lookup(changeid)
384 n = self.changelog.lookup(changeid)
381 pl = self.changelog.parents(n)
385 pl = self.changelog.parents(n)
382 if pl[1] == nullid:
386 if pl[1] == nullid:
383 return [self.changectx(pl[0])]
387 return [self.changectx(pl[0])]
384 return [self.changectx(pl[0]), self.changectx(pl[1])]
388 return [self.changectx(pl[0]), self.changectx(pl[1])]
385
389
386 def filectx(self, path, changeid=None, fileid=None):
390 def filectx(self, path, changeid=None, fileid=None):
387 """changeid can be a changeset revision, node, or tag.
391 """changeid can be a changeset revision, node, or tag.
388 fileid can be a file revision or node."""
392 fileid can be a file revision or node."""
389 return context.filectx(self, path, changeid, fileid)
393 return context.filectx(self, path, changeid, fileid)
390
394
391 def getcwd(self):
395 def getcwd(self):
392 return self.dirstate.getcwd()
396 return self.dirstate.getcwd()
393
397
394 def wfile(self, f, mode='r'):
398 def wfile(self, f, mode='r'):
395 return self.wopener(f, mode)
399 return self.wopener(f, mode)
396
400
397 def wread(self, filename):
401 def wread(self, filename):
398 if self.encodepats == None:
402 if self.encodepats == None:
399 l = []
403 l = []
400 for pat, cmd in self.ui.configitems("encode"):
404 for pat, cmd in self.ui.configitems("encode"):
401 mf = util.matcher(self.root, "", [pat], [], [])[1]
405 mf = util.matcher(self.root, "", [pat], [], [])[1]
402 l.append((mf, cmd))
406 l.append((mf, cmd))
403 self.encodepats = l
407 self.encodepats = l
404
408
405 data = self.wopener(filename, 'r').read()
409 data = self.wopener(filename, 'r').read()
406
410
407 for mf, cmd in self.encodepats:
411 for mf, cmd in self.encodepats:
408 if mf(filename):
412 if mf(filename):
409 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
413 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
410 data = util.filter(data, cmd)
414 data = util.filter(data, cmd)
411 break
415 break
412
416
413 return data
417 return data
414
418
415 def wwrite(self, filename, data, fd=None):
419 def wwrite(self, filename, data, fd=None):
416 if self.decodepats == None:
420 if self.decodepats == None:
417 l = []
421 l = []
418 for pat, cmd in self.ui.configitems("decode"):
422 for pat, cmd in self.ui.configitems("decode"):
419 mf = util.matcher(self.root, "", [pat], [], [])[1]
423 mf = util.matcher(self.root, "", [pat], [], [])[1]
420 l.append((mf, cmd))
424 l.append((mf, cmd))
421 self.decodepats = l
425 self.decodepats = l
422
426
423 for mf, cmd in self.decodepats:
427 for mf, cmd in self.decodepats:
424 if mf(filename):
428 if mf(filename):
425 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
429 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
426 data = util.filter(data, cmd)
430 data = util.filter(data, cmd)
427 break
431 break
428
432
429 if fd:
433 if fd:
430 return fd.write(data)
434 return fd.write(data)
431 return self.wopener(filename, 'w').write(data)
435 return self.wopener(filename, 'w').write(data)
432
436
433 def transaction(self):
437 def transaction(self):
434 tr = self.transhandle
438 tr = self.transhandle
435 if tr != None and tr.running():
439 if tr != None and tr.running():
436 return tr.nest()
440 return tr.nest()
437
441
438 # save dirstate for rollback
442 # save dirstate for rollback
439 try:
443 try:
440 ds = self.opener("dirstate").read()
444 ds = self.opener("dirstate").read()
441 except IOError:
445 except IOError:
442 ds = ""
446 ds = ""
443 self.opener("journal.dirstate", "w").write(ds)
447 self.opener("journal.dirstate", "w").write(ds)
444
448
445 tr = transaction.transaction(self.ui.warn, self.opener,
449 tr = transaction.transaction(self.ui.warn, self.sopener,
446 self.join("journal"),
450 self.sjoin("journal"),
447 aftertrans(self.path))
451 aftertrans(self.path))
448 self.transhandle = tr
452 self.transhandle = tr
449 return tr
453 return tr
450
454
451 def recover(self):
455 def recover(self):
452 l = self.lock()
456 l = self.lock()
453 if os.path.exists(self.join("journal")):
457 if os.path.exists(self.sjoin("journal")):
454 self.ui.status(_("rolling back interrupted transaction\n"))
458 self.ui.status(_("rolling back interrupted transaction\n"))
455 transaction.rollback(self.opener, self.join("journal"))
459 transaction.rollback(self.sopener, self.sjoin("journal"))
456 self.reload()
460 self.reload()
457 return True
461 return True
458 else:
462 else:
459 self.ui.warn(_("no interrupted transaction available\n"))
463 self.ui.warn(_("no interrupted transaction available\n"))
460 return False
464 return False
461
465
462 def rollback(self, wlock=None):
466 def rollback(self, wlock=None):
463 if not wlock:
467 if not wlock:
464 wlock = self.wlock()
468 wlock = self.wlock()
465 l = self.lock()
469 l = self.lock()
466 if os.path.exists(self.join("undo")):
470 if os.path.exists(self.sjoin("undo")):
467 self.ui.status(_("rolling back last transaction\n"))
471 self.ui.status(_("rolling back last transaction\n"))
468 transaction.rollback(self.opener, self.join("undo"))
472 transaction.rollback(self.sopener, self.sjoin("undo"))
469 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
473 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
470 self.reload()
474 self.reload()
471 self.wreload()
475 self.wreload()
472 else:
476 else:
473 self.ui.warn(_("no rollback information available\n"))
477 self.ui.warn(_("no rollback information available\n"))
474
478
475 def wreload(self):
479 def wreload(self):
476 self.dirstate.read()
480 self.dirstate.read()
477
481
478 def reload(self):
482 def reload(self):
479 self.changelog.load()
483 self.changelog.load()
480 self.manifest.load()
484 self.manifest.load()
481 self.tagscache = None
485 self.tagscache = None
482 self.nodetagscache = None
486 self.nodetagscache = None
483
487
484 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
488 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
485 desc=None):
489 desc=None):
486 try:
490 try:
487 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
491 l = lock.lock(lockname, 0, releasefn, desc=desc)
488 except lock.LockHeld, inst:
492 except lock.LockHeld, inst:
489 if not wait:
493 if not wait:
490 raise
494 raise
491 self.ui.warn(_("waiting for lock on %s held by %s\n") %
495 self.ui.warn(_("waiting for lock on %s held by %s\n") %
492 (desc, inst.args[0]))
496 (desc, inst.args[0]))
493 # default to 600 seconds timeout
497 # default to 600 seconds timeout
494 l = lock.lock(self.join(lockname),
498 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
495 int(self.ui.config("ui", "timeout") or 600),
496 releasefn, desc=desc)
499 releasefn, desc=desc)
497 if acquirefn:
500 if acquirefn:
498 acquirefn()
501 acquirefn()
499 return l
502 return l
500
503
501 def lock(self, wait=1):
504 def lock(self, wait=1):
502 return self.do_lock("lock", wait, acquirefn=self.reload,
505 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
503 desc=_('repository %s') % self.origroot)
506 desc=_('repository %s') % self.origroot)
504
507
505 def wlock(self, wait=1):
508 def wlock(self, wait=1):
506 return self.do_lock("wlock", wait, self.dirstate.write,
509 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
507 self.wreload,
510 self.wreload,
508 desc=_('working directory of %s') % self.origroot)
511 desc=_('working directory of %s') % self.origroot)
509
512
510 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
513 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
511 """
514 """
512 commit an individual file as part of a larger transaction
515 commit an individual file as part of a larger transaction
513 """
516 """
514
517
515 t = self.wread(fn)
518 t = self.wread(fn)
516 fl = self.file(fn)
519 fl = self.file(fn)
517 fp1 = manifest1.get(fn, nullid)
520 fp1 = manifest1.get(fn, nullid)
518 fp2 = manifest2.get(fn, nullid)
521 fp2 = manifest2.get(fn, nullid)
519
522
520 meta = {}
523 meta = {}
521 cp = self.dirstate.copied(fn)
524 cp = self.dirstate.copied(fn)
522 if cp:
525 if cp:
523 meta["copy"] = cp
526 meta["copy"] = cp
524 if not manifest2: # not a branch merge
527 if not manifest2: # not a branch merge
525 meta["copyrev"] = hex(manifest1.get(cp, nullid))
528 meta["copyrev"] = hex(manifest1.get(cp, nullid))
526 fp2 = nullid
529 fp2 = nullid
527 elif fp2 != nullid: # copied on remote side
530 elif fp2 != nullid: # copied on remote side
528 meta["copyrev"] = hex(manifest1.get(cp, nullid))
531 meta["copyrev"] = hex(manifest1.get(cp, nullid))
529 else: # copied on local side, reversed
532 else: # copied on local side, reversed
530 meta["copyrev"] = hex(manifest2.get(cp))
533 meta["copyrev"] = hex(manifest2.get(cp))
531 fp2 = nullid
534 fp2 = nullid
532 self.ui.debug(_(" %s: copy %s:%s\n") %
535 self.ui.debug(_(" %s: copy %s:%s\n") %
533 (fn, cp, meta["copyrev"]))
536 (fn, cp, meta["copyrev"]))
534 fp1 = nullid
537 fp1 = nullid
535 elif fp2 != nullid:
538 elif fp2 != nullid:
536 # is one parent an ancestor of the other?
539 # is one parent an ancestor of the other?
537 fpa = fl.ancestor(fp1, fp2)
540 fpa = fl.ancestor(fp1, fp2)
538 if fpa == fp1:
541 if fpa == fp1:
539 fp1, fp2 = fp2, nullid
542 fp1, fp2 = fp2, nullid
540 elif fpa == fp2:
543 elif fpa == fp2:
541 fp2 = nullid
544 fp2 = nullid
542
545
543 # is the file unmodified from the parent? report existing entry
546 # is the file unmodified from the parent? report existing entry
544 if fp2 == nullid and not fl.cmp(fp1, t):
547 if fp2 == nullid and not fl.cmp(fp1, t):
545 return fp1
548 return fp1
546
549
547 changelist.append(fn)
550 changelist.append(fn)
548 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
551 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
549
552
550 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
553 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
551 orig_parent = self.dirstate.parents()[0] or nullid
554 orig_parent = self.dirstate.parents()[0] or nullid
552 p1 = p1 or self.dirstate.parents()[0] or nullid
555 p1 = p1 or self.dirstate.parents()[0] or nullid
553 p2 = p2 or self.dirstate.parents()[1] or nullid
556 p2 = p2 or self.dirstate.parents()[1] or nullid
554 c1 = self.changelog.read(p1)
557 c1 = self.changelog.read(p1)
555 c2 = self.changelog.read(p2)
558 c2 = self.changelog.read(p2)
556 m1 = self.manifest.read(c1[0]).copy()
559 m1 = self.manifest.read(c1[0]).copy()
557 m2 = self.manifest.read(c2[0])
560 m2 = self.manifest.read(c2[0])
558 changed = []
561 changed = []
559 removed = []
562 removed = []
560
563
561 if orig_parent == p1:
564 if orig_parent == p1:
562 update_dirstate = 1
565 update_dirstate = 1
563 else:
566 else:
564 update_dirstate = 0
567 update_dirstate = 0
565
568
566 if not wlock:
569 if not wlock:
567 wlock = self.wlock()
570 wlock = self.wlock()
568 l = self.lock()
571 l = self.lock()
569 tr = self.transaction()
572 tr = self.transaction()
570 linkrev = self.changelog.count()
573 linkrev = self.changelog.count()
571 for f in files:
574 for f in files:
572 try:
575 try:
573 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
576 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
574 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
577 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
575 except IOError:
578 except IOError:
576 try:
579 try:
577 del m1[f]
580 del m1[f]
578 if update_dirstate:
581 if update_dirstate:
579 self.dirstate.forget([f])
582 self.dirstate.forget([f])
580 removed.append(f)
583 removed.append(f)
581 except:
584 except:
582 # deleted from p2?
585 # deleted from p2?
583 pass
586 pass
584
587
585 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
588 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
586 user = user or self.ui.username()
589 user = user or self.ui.username()
587 n = self.changelog.add(mnode, changed + removed, text,
590 n = self.changelog.add(mnode, changed + removed, text,
588 tr, p1, p2, user, date)
591 tr, p1, p2, user, date)
589 tr.close()
592 tr.close()
590 if update_dirstate:
593 if update_dirstate:
591 self.dirstate.setparents(n, nullid)
594 self.dirstate.setparents(n, nullid)
592
595
593 def commit(self, files=None, text="", user=None, date=None,
596 def commit(self, files=None, text="", user=None, date=None,
594 match=util.always, force=False, lock=None, wlock=None,
597 match=util.always, force=False, lock=None, wlock=None,
595 force_editor=False):
598 force_editor=False):
596 commit = []
599 commit = []
597 remove = []
600 remove = []
598 changed = []
601 changed = []
599
602
600 if files:
603 if files:
601 for f in files:
604 for f in files:
602 s = self.dirstate.state(f)
605 s = self.dirstate.state(f)
603 if s in 'nmai':
606 if s in 'nmai':
604 commit.append(f)
607 commit.append(f)
605 elif s == 'r':
608 elif s == 'r':
606 remove.append(f)
609 remove.append(f)
607 else:
610 else:
608 self.ui.warn(_("%s not tracked!\n") % f)
611 self.ui.warn(_("%s not tracked!\n") % f)
609 else:
612 else:
610 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
613 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
611 commit = modified + added
614 commit = modified + added
612 remove = removed
615 remove = removed
613
616
614 p1, p2 = self.dirstate.parents()
617 p1, p2 = self.dirstate.parents()
615 c1 = self.changelog.read(p1)
618 c1 = self.changelog.read(p1)
616 c2 = self.changelog.read(p2)
619 c2 = self.changelog.read(p2)
617 m1 = self.manifest.read(c1[0]).copy()
620 m1 = self.manifest.read(c1[0]).copy()
618 m2 = self.manifest.read(c2[0])
621 m2 = self.manifest.read(c2[0])
619
622
620 branchname = self.workingctx().branch()
623 branchname = self.workingctx().branch()
621 oldname = c1[5].get("branch", "")
624 oldname = c1[5].get("branch", "")
622
625
623 if not commit and not remove and not force and p2 == nullid and \
626 if not commit and not remove and not force and p2 == nullid and \
624 branchname == oldname:
627 branchname == oldname:
625 self.ui.status(_("nothing changed\n"))
628 self.ui.status(_("nothing changed\n"))
626 return None
629 return None
627
630
628 xp1 = hex(p1)
631 xp1 = hex(p1)
629 if p2 == nullid: xp2 = ''
632 if p2 == nullid: xp2 = ''
630 else: xp2 = hex(p2)
633 else: xp2 = hex(p2)
631
634
632 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
635 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
633
636
634 if not wlock:
637 if not wlock:
635 wlock = self.wlock()
638 wlock = self.wlock()
636 if not lock:
639 if not lock:
637 lock = self.lock()
640 lock = self.lock()
638 tr = self.transaction()
641 tr = self.transaction()
639
642
640 # check in files
643 # check in files
641 new = {}
644 new = {}
642 linkrev = self.changelog.count()
645 linkrev = self.changelog.count()
643 commit.sort()
646 commit.sort()
644 for f in commit:
647 for f in commit:
645 self.ui.note(f + "\n")
648 self.ui.note(f + "\n")
646 try:
649 try:
647 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
650 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
648 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
651 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
649 except IOError:
652 except IOError:
650 self.ui.warn(_("trouble committing %s!\n") % f)
653 self.ui.warn(_("trouble committing %s!\n") % f)
651 raise
654 raise
652
655
653 # update manifest
656 # update manifest
654 m1.update(new)
657 m1.update(new)
655 for f in remove:
658 for f in remove:
656 if f in m1:
659 if f in m1:
657 del m1[f]
660 del m1[f]
658 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
661 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
659
662
660 # add changeset
663 # add changeset
661 new = new.keys()
664 new = new.keys()
662 new.sort()
665 new.sort()
663
666
664 user = user or self.ui.username()
667 user = user or self.ui.username()
665 if not text or force_editor:
668 if not text or force_editor:
666 edittext = []
669 edittext = []
667 if text:
670 if text:
668 edittext.append(text)
671 edittext.append(text)
669 edittext.append("")
672 edittext.append("")
670 if p2 != nullid:
673 if p2 != nullid:
671 edittext.append("HG: branch merge")
674 edittext.append("HG: branch merge")
672 edittext.extend(["HG: changed %s" % f for f in changed])
675 edittext.extend(["HG: changed %s" % f for f in changed])
673 edittext.extend(["HG: removed %s" % f for f in remove])
676 edittext.extend(["HG: removed %s" % f for f in remove])
674 if not changed and not remove:
677 if not changed and not remove:
675 edittext.append("HG: no files changed")
678 edittext.append("HG: no files changed")
676 edittext.append("")
679 edittext.append("")
677 # run editor in the repository root
680 # run editor in the repository root
678 olddir = os.getcwd()
681 olddir = os.getcwd()
679 os.chdir(self.root)
682 os.chdir(self.root)
680 text = self.ui.edit("\n".join(edittext), user)
683 text = self.ui.edit("\n".join(edittext), user)
681 os.chdir(olddir)
684 os.chdir(olddir)
682
685
683 lines = [line.rstrip() for line in text.rstrip().splitlines()]
686 lines = [line.rstrip() for line in text.rstrip().splitlines()]
684 while lines and not lines[0]:
687 while lines and not lines[0]:
685 del lines[0]
688 del lines[0]
686 if not lines:
689 if not lines:
687 return None
690 return None
688 text = '\n'.join(lines)
691 text = '\n'.join(lines)
689 extra = {}
692 extra = {}
690 if branchname:
693 if branchname:
691 extra["branch"] = branchname
694 extra["branch"] = branchname
692 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
695 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
693 user, date, extra)
696 user, date, extra)
694 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
697 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
695 parent2=xp2)
698 parent2=xp2)
696 tr.close()
699 tr.close()
697
700
698 self.dirstate.setparents(n)
701 self.dirstate.setparents(n)
699 self.dirstate.update(new, "n")
702 self.dirstate.update(new, "n")
700 self.dirstate.forget(remove)
703 self.dirstate.forget(remove)
701
704
702 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
705 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
703 return n
706 return n
704
707
705 def walk(self, node=None, files=[], match=util.always, badmatch=None):
708 def walk(self, node=None, files=[], match=util.always, badmatch=None):
706 if node:
709 if node:
707 fdict = dict.fromkeys(files)
710 fdict = dict.fromkeys(files)
708 for fn in self.manifest.read(self.changelog.read(node)[0]):
711 for fn in self.manifest.read(self.changelog.read(node)[0]):
709 for ffn in fdict:
712 for ffn in fdict:
710 # match if the file is the exact name or a directory
713 # match if the file is the exact name or a directory
711 if ffn == fn or fn.startswith("%s/" % ffn):
714 if ffn == fn or fn.startswith("%s/" % ffn):
712 del fdict[ffn]
715 del fdict[ffn]
713 break
716 break
714 if match(fn):
717 if match(fn):
715 yield 'm', fn
718 yield 'm', fn
716 for fn in fdict:
719 for fn in fdict:
717 if badmatch and badmatch(fn):
720 if badmatch and badmatch(fn):
718 if match(fn):
721 if match(fn):
719 yield 'b', fn
722 yield 'b', fn
720 else:
723 else:
721 self.ui.warn(_('%s: No such file in rev %s\n') % (
724 self.ui.warn(_('%s: No such file in rev %s\n') % (
722 util.pathto(self.getcwd(), fn), short(node)))
725 util.pathto(self.getcwd(), fn), short(node)))
723 else:
726 else:
724 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
727 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
725 yield src, fn
728 yield src, fn
726
729
727 def status(self, node1=None, node2=None, files=[], match=util.always,
730 def status(self, node1=None, node2=None, files=[], match=util.always,
728 wlock=None, list_ignored=False, list_clean=False):
731 wlock=None, list_ignored=False, list_clean=False):
729 """return status of files between two nodes or node and working directory
732 """return status of files between two nodes or node and working directory
730
733
731 If node1 is None, use the first dirstate parent instead.
734 If node1 is None, use the first dirstate parent instead.
732 If node2 is None, compare node1 with working directory.
735 If node2 is None, compare node1 with working directory.
733 """
736 """
734
737
735 def fcmp(fn, mf):
738 def fcmp(fn, mf):
736 t1 = self.wread(fn)
739 t1 = self.wread(fn)
737 return self.file(fn).cmp(mf.get(fn, nullid), t1)
740 return self.file(fn).cmp(mf.get(fn, nullid), t1)
738
741
739 def mfmatches(node):
742 def mfmatches(node):
740 change = self.changelog.read(node)
743 change = self.changelog.read(node)
741 mf = self.manifest.read(change[0]).copy()
744 mf = self.manifest.read(change[0]).copy()
742 for fn in mf.keys():
745 for fn in mf.keys():
743 if not match(fn):
746 if not match(fn):
744 del mf[fn]
747 del mf[fn]
745 return mf
748 return mf
746
749
747 modified, added, removed, deleted, unknown = [], [], [], [], []
750 modified, added, removed, deleted, unknown = [], [], [], [], []
748 ignored, clean = [], []
751 ignored, clean = [], []
749
752
750 compareworking = False
753 compareworking = False
751 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
754 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
752 compareworking = True
755 compareworking = True
753
756
754 if not compareworking:
757 if not compareworking:
755 # read the manifest from node1 before the manifest from node2,
758 # read the manifest from node1 before the manifest from node2,
756 # so that we'll hit the manifest cache if we're going through
759 # so that we'll hit the manifest cache if we're going through
757 # all the revisions in parent->child order.
760 # all the revisions in parent->child order.
758 mf1 = mfmatches(node1)
761 mf1 = mfmatches(node1)
759
762
760 # are we comparing the working directory?
763 # are we comparing the working directory?
761 if not node2:
764 if not node2:
762 if not wlock:
765 if not wlock:
763 try:
766 try:
764 wlock = self.wlock(wait=0)
767 wlock = self.wlock(wait=0)
765 except lock.LockException:
768 except lock.LockException:
766 wlock = None
769 wlock = None
767 (lookup, modified, added, removed, deleted, unknown,
770 (lookup, modified, added, removed, deleted, unknown,
768 ignored, clean) = self.dirstate.status(files, match,
771 ignored, clean) = self.dirstate.status(files, match,
769 list_ignored, list_clean)
772 list_ignored, list_clean)
770
773
771 # are we comparing working dir against its parent?
774 # are we comparing working dir against its parent?
772 if compareworking:
775 if compareworking:
773 if lookup:
776 if lookup:
774 # do a full compare of any files that might have changed
777 # do a full compare of any files that might have changed
775 mf2 = mfmatches(self.dirstate.parents()[0])
778 mf2 = mfmatches(self.dirstate.parents()[0])
776 for f in lookup:
779 for f in lookup:
777 if fcmp(f, mf2):
780 if fcmp(f, mf2):
778 modified.append(f)
781 modified.append(f)
779 else:
782 else:
780 clean.append(f)
783 clean.append(f)
781 if wlock is not None:
784 if wlock is not None:
782 self.dirstate.update([f], "n")
785 self.dirstate.update([f], "n")
783 else:
786 else:
784 # we are comparing working dir against non-parent
787 # we are comparing working dir against non-parent
785 # generate a pseudo-manifest for the working dir
788 # generate a pseudo-manifest for the working dir
786 # XXX: create it in dirstate.py ?
789 # XXX: create it in dirstate.py ?
787 mf2 = mfmatches(self.dirstate.parents()[0])
790 mf2 = mfmatches(self.dirstate.parents()[0])
788 for f in lookup + modified + added:
791 for f in lookup + modified + added:
789 mf2[f] = ""
792 mf2[f] = ""
790 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
793 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
791 for f in removed:
794 for f in removed:
792 if f in mf2:
795 if f in mf2:
793 del mf2[f]
796 del mf2[f]
794 else:
797 else:
795 # we are comparing two revisions
798 # we are comparing two revisions
796 mf2 = mfmatches(node2)
799 mf2 = mfmatches(node2)
797
800
798 if not compareworking:
801 if not compareworking:
799 # flush lists from dirstate before comparing manifests
802 # flush lists from dirstate before comparing manifests
800 modified, added, clean = [], [], []
803 modified, added, clean = [], [], []
801
804
802 # make sure to sort the files so we talk to the disk in a
805 # make sure to sort the files so we talk to the disk in a
803 # reasonable order
806 # reasonable order
804 mf2keys = mf2.keys()
807 mf2keys = mf2.keys()
805 mf2keys.sort()
808 mf2keys.sort()
806 for fn in mf2keys:
809 for fn in mf2keys:
807 if mf1.has_key(fn):
810 if mf1.has_key(fn):
808 if mf1.flags(fn) != mf2.flags(fn) or \
811 if mf1.flags(fn) != mf2.flags(fn) or \
809 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
812 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
810 modified.append(fn)
813 modified.append(fn)
811 elif list_clean:
814 elif list_clean:
812 clean.append(fn)
815 clean.append(fn)
813 del mf1[fn]
816 del mf1[fn]
814 else:
817 else:
815 added.append(fn)
818 added.append(fn)
816
819
817 removed = mf1.keys()
820 removed = mf1.keys()
818
821
819 # sort and return results:
822 # sort and return results:
820 for l in modified, added, removed, deleted, unknown, ignored, clean:
823 for l in modified, added, removed, deleted, unknown, ignored, clean:
821 l.sort()
824 l.sort()
822 return (modified, added, removed, deleted, unknown, ignored, clean)
825 return (modified, added, removed, deleted, unknown, ignored, clean)
823
826
824 def add(self, list, wlock=None):
827 def add(self, list, wlock=None):
825 if not wlock:
828 if not wlock:
826 wlock = self.wlock()
829 wlock = self.wlock()
827 for f in list:
830 for f in list:
828 p = self.wjoin(f)
831 p = self.wjoin(f)
829 if not os.path.exists(p):
832 if not os.path.exists(p):
830 self.ui.warn(_("%s does not exist!\n") % f)
833 self.ui.warn(_("%s does not exist!\n") % f)
831 elif not os.path.isfile(p):
834 elif not os.path.isfile(p):
832 self.ui.warn(_("%s not added: only files supported currently\n")
835 self.ui.warn(_("%s not added: only files supported currently\n")
833 % f)
836 % f)
834 elif self.dirstate.state(f) in 'an':
837 elif self.dirstate.state(f) in 'an':
835 self.ui.warn(_("%s already tracked!\n") % f)
838 self.ui.warn(_("%s already tracked!\n") % f)
836 else:
839 else:
837 self.dirstate.update([f], "a")
840 self.dirstate.update([f], "a")
838
841
839 def forget(self, list, wlock=None):
842 def forget(self, list, wlock=None):
840 if not wlock:
843 if not wlock:
841 wlock = self.wlock()
844 wlock = self.wlock()
842 for f in list:
845 for f in list:
843 if self.dirstate.state(f) not in 'ai':
846 if self.dirstate.state(f) not in 'ai':
844 self.ui.warn(_("%s not added!\n") % f)
847 self.ui.warn(_("%s not added!\n") % f)
845 else:
848 else:
846 self.dirstate.forget([f])
849 self.dirstate.forget([f])
847
850
848 def remove(self, list, unlink=False, wlock=None):
851 def remove(self, list, unlink=False, wlock=None):
849 if unlink:
852 if unlink:
850 for f in list:
853 for f in list:
851 try:
854 try:
852 util.unlink(self.wjoin(f))
855 util.unlink(self.wjoin(f))
853 except OSError, inst:
856 except OSError, inst:
854 if inst.errno != errno.ENOENT:
857 if inst.errno != errno.ENOENT:
855 raise
858 raise
856 if not wlock:
859 if not wlock:
857 wlock = self.wlock()
860 wlock = self.wlock()
858 for f in list:
861 for f in list:
859 p = self.wjoin(f)
862 p = self.wjoin(f)
860 if os.path.exists(p):
863 if os.path.exists(p):
861 self.ui.warn(_("%s still exists!\n") % f)
864 self.ui.warn(_("%s still exists!\n") % f)
862 elif self.dirstate.state(f) == 'a':
865 elif self.dirstate.state(f) == 'a':
863 self.dirstate.forget([f])
866 self.dirstate.forget([f])
864 elif f not in self.dirstate:
867 elif f not in self.dirstate:
865 self.ui.warn(_("%s not tracked!\n") % f)
868 self.ui.warn(_("%s not tracked!\n") % f)
866 else:
869 else:
867 self.dirstate.update([f], "r")
870 self.dirstate.update([f], "r")
868
871
869 def undelete(self, list, wlock=None):
872 def undelete(self, list, wlock=None):
870 p = self.dirstate.parents()[0]
873 p = self.dirstate.parents()[0]
871 mn = self.changelog.read(p)[0]
874 mn = self.changelog.read(p)[0]
872 m = self.manifest.read(mn)
875 m = self.manifest.read(mn)
873 if not wlock:
876 if not wlock:
874 wlock = self.wlock()
877 wlock = self.wlock()
875 for f in list:
878 for f in list:
876 if self.dirstate.state(f) not in "r":
879 if self.dirstate.state(f) not in "r":
877 self.ui.warn("%s not removed!\n" % f)
880 self.ui.warn("%s not removed!\n" % f)
878 else:
881 else:
879 t = self.file(f).read(m[f])
882 t = self.file(f).read(m[f])
880 self.wwrite(f, t)
883 self.wwrite(f, t)
881 util.set_exec(self.wjoin(f), m.execf(f))
884 util.set_exec(self.wjoin(f), m.execf(f))
882 self.dirstate.update([f], "n")
885 self.dirstate.update([f], "n")
883
886
884 def copy(self, source, dest, wlock=None):
887 def copy(self, source, dest, wlock=None):
885 p = self.wjoin(dest)
888 p = self.wjoin(dest)
886 if not os.path.exists(p):
889 if not os.path.exists(p):
887 self.ui.warn(_("%s does not exist!\n") % dest)
890 self.ui.warn(_("%s does not exist!\n") % dest)
888 elif not os.path.isfile(p):
891 elif not os.path.isfile(p):
889 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
892 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
890 else:
893 else:
891 if not wlock:
894 if not wlock:
892 wlock = self.wlock()
895 wlock = self.wlock()
893 if self.dirstate.state(dest) == '?':
896 if self.dirstate.state(dest) == '?':
894 self.dirstate.update([dest], "a")
897 self.dirstate.update([dest], "a")
895 self.dirstate.copy(source, dest)
898 self.dirstate.copy(source, dest)
896
899
897 def heads(self, start=None):
900 def heads(self, start=None):
898 heads = self.changelog.heads(start)
901 heads = self.changelog.heads(start)
899 # sort the output in rev descending order
902 # sort the output in rev descending order
900 heads = [(-self.changelog.rev(h), h) for h in heads]
903 heads = [(-self.changelog.rev(h), h) for h in heads]
901 heads.sort()
904 heads.sort()
902 return [n for (r, n) in heads]
905 return [n for (r, n) in heads]
903
906
904 # branchlookup returns a dict giving a list of branches for
907 # branchlookup returns a dict giving a list of branches for
905 # each head. A branch is defined as the tag of a node or
908 # each head. A branch is defined as the tag of a node or
906 # the branch of the node's parents. If a node has multiple
909 # the branch of the node's parents. If a node has multiple
907 # branch tags, tags are eliminated if they are visible from other
910 # branch tags, tags are eliminated if they are visible from other
908 # branch tags.
911 # branch tags.
909 #
912 #
910 # So, for this graph: a->b->c->d->e
913 # So, for this graph: a->b->c->d->e
911 # \ /
914 # \ /
912 # aa -----/
915 # aa -----/
913 # a has tag 2.6.12
916 # a has tag 2.6.12
914 # d has tag 2.6.13
917 # d has tag 2.6.13
915 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
918 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
916 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
919 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
917 # from the list.
920 # from the list.
918 #
921 #
919 # It is possible that more than one head will have the same branch tag.
922 # It is possible that more than one head will have the same branch tag.
920 # callers need to check the result for multiple heads under the same
923 # callers need to check the result for multiple heads under the same
921 # branch tag if that is a problem for them (ie checkout of a specific
924 # branch tag if that is a problem for them (ie checkout of a specific
922 # branch).
925 # branch).
923 #
926 #
924 # passing in a specific branch will limit the depth of the search
927 # passing in a specific branch will limit the depth of the search
925 # through the parents. It won't limit the branches returned in the
928 # through the parents. It won't limit the branches returned in the
926 # result though.
929 # result though.
927 def branchlookup(self, heads=None, branch=None):
930 def branchlookup(self, heads=None, branch=None):
928 if not heads:
931 if not heads:
929 heads = self.heads()
932 heads = self.heads()
930 headt = [ h for h in heads ]
933 headt = [ h for h in heads ]
931 chlog = self.changelog
934 chlog = self.changelog
932 branches = {}
935 branches = {}
933 merges = []
936 merges = []
934 seenmerge = {}
937 seenmerge = {}
935
938
936 # traverse the tree once for each head, recording in the branches
939 # traverse the tree once for each head, recording in the branches
937 # dict which tags are visible from this head. The branches
940 # dict which tags are visible from this head. The branches
938 # dict also records which tags are visible from each tag
941 # dict also records which tags are visible from each tag
939 # while we traverse.
942 # while we traverse.
940 while headt or merges:
943 while headt or merges:
941 if merges:
944 if merges:
942 n, found = merges.pop()
945 n, found = merges.pop()
943 visit = [n]
946 visit = [n]
944 else:
947 else:
945 h = headt.pop()
948 h = headt.pop()
946 visit = [h]
949 visit = [h]
947 found = [h]
950 found = [h]
948 seen = {}
951 seen = {}
949 while visit:
952 while visit:
950 n = visit.pop()
953 n = visit.pop()
951 if n in seen:
954 if n in seen:
952 continue
955 continue
953 pp = chlog.parents(n)
956 pp = chlog.parents(n)
954 tags = self.nodetags(n)
957 tags = self.nodetags(n)
955 if tags:
958 if tags:
956 for x in tags:
959 for x in tags:
957 if x == 'tip':
960 if x == 'tip':
958 continue
961 continue
959 for f in found:
962 for f in found:
960 branches.setdefault(f, {})[n] = 1
963 branches.setdefault(f, {})[n] = 1
961 branches.setdefault(n, {})[n] = 1
964 branches.setdefault(n, {})[n] = 1
962 break
965 break
963 if n not in found:
966 if n not in found:
964 found.append(n)
967 found.append(n)
965 if branch in tags:
968 if branch in tags:
966 continue
969 continue
967 seen[n] = 1
970 seen[n] = 1
968 if pp[1] != nullid and n not in seenmerge:
971 if pp[1] != nullid and n not in seenmerge:
969 merges.append((pp[1], [x for x in found]))
972 merges.append((pp[1], [x for x in found]))
970 seenmerge[n] = 1
973 seenmerge[n] = 1
971 if pp[0] != nullid:
974 if pp[0] != nullid:
972 visit.append(pp[0])
975 visit.append(pp[0])
973 # traverse the branches dict, eliminating branch tags from each
976 # traverse the branches dict, eliminating branch tags from each
974 # head that are visible from another branch tag for that head.
977 # head that are visible from another branch tag for that head.
975 out = {}
978 out = {}
976 viscache = {}
979 viscache = {}
977 for h in heads:
980 for h in heads:
978 def visible(node):
981 def visible(node):
979 if node in viscache:
982 if node in viscache:
980 return viscache[node]
983 return viscache[node]
981 ret = {}
984 ret = {}
982 visit = [node]
985 visit = [node]
983 while visit:
986 while visit:
984 x = visit.pop()
987 x = visit.pop()
985 if x in viscache:
988 if x in viscache:
986 ret.update(viscache[x])
989 ret.update(viscache[x])
987 elif x not in ret:
990 elif x not in ret:
988 ret[x] = 1
991 ret[x] = 1
989 if x in branches:
992 if x in branches:
990 visit[len(visit):] = branches[x].keys()
993 visit[len(visit):] = branches[x].keys()
991 viscache[node] = ret
994 viscache[node] = ret
992 return ret
995 return ret
993 if h not in branches:
996 if h not in branches:
994 continue
997 continue
995 # O(n^2), but somewhat limited. This only searches the
998 # O(n^2), but somewhat limited. This only searches the
996 # tags visible from a specific head, not all the tags in the
999 # tags visible from a specific head, not all the tags in the
997 # whole repo.
1000 # whole repo.
998 for b in branches[h]:
1001 for b in branches[h]:
999 vis = False
1002 vis = False
1000 for bb in branches[h].keys():
1003 for bb in branches[h].keys():
1001 if b != bb:
1004 if b != bb:
1002 if b in visible(bb):
1005 if b in visible(bb):
1003 vis = True
1006 vis = True
1004 break
1007 break
1005 if not vis:
1008 if not vis:
1006 l = out.setdefault(h, [])
1009 l = out.setdefault(h, [])
1007 l[len(l):] = self.nodetags(b)
1010 l[len(l):] = self.nodetags(b)
1008 return out
1011 return out
1009
1012
1010 def branches(self, nodes):
1013 def branches(self, nodes):
1011 if not nodes:
1014 if not nodes:
1012 nodes = [self.changelog.tip()]
1015 nodes = [self.changelog.tip()]
1013 b = []
1016 b = []
1014 for n in nodes:
1017 for n in nodes:
1015 t = n
1018 t = n
1016 while 1:
1019 while 1:
1017 p = self.changelog.parents(n)
1020 p = self.changelog.parents(n)
1018 if p[1] != nullid or p[0] == nullid:
1021 if p[1] != nullid or p[0] == nullid:
1019 b.append((t, n, p[0], p[1]))
1022 b.append((t, n, p[0], p[1]))
1020 break
1023 break
1021 n = p[0]
1024 n = p[0]
1022 return b
1025 return b
1023
1026
1024 def between(self, pairs):
1027 def between(self, pairs):
1025 r = []
1028 r = []
1026
1029
1027 for top, bottom in pairs:
1030 for top, bottom in pairs:
1028 n, l, i = top, [], 0
1031 n, l, i = top, [], 0
1029 f = 1
1032 f = 1
1030
1033
1031 while n != bottom:
1034 while n != bottom:
1032 p = self.changelog.parents(n)[0]
1035 p = self.changelog.parents(n)[0]
1033 if i == f:
1036 if i == f:
1034 l.append(n)
1037 l.append(n)
1035 f = f * 2
1038 f = f * 2
1036 n = p
1039 n = p
1037 i += 1
1040 i += 1
1038
1041
1039 r.append(l)
1042 r.append(l)
1040
1043
1041 return r
1044 return r
1042
1045
1043 def findincoming(self, remote, base=None, heads=None, force=False):
1046 def findincoming(self, remote, base=None, heads=None, force=False):
1044 """Return list of roots of the subsets of missing nodes from remote
1047 """Return list of roots of the subsets of missing nodes from remote
1045
1048
1046 If base dict is specified, assume that these nodes and their parents
1049 If base dict is specified, assume that these nodes and their parents
1047 exist on the remote side and that no child of a node of base exists
1050 exist on the remote side and that no child of a node of base exists
1048 in both remote and self.
1051 in both remote and self.
1049 Furthermore base will be updated to include the nodes that exists
1052 Furthermore base will be updated to include the nodes that exists
1050 in self and remote but no children exists in self and remote.
1053 in self and remote but no children exists in self and remote.
1051 If a list of heads is specified, return only nodes which are heads
1054 If a list of heads is specified, return only nodes which are heads
1052 or ancestors of these heads.
1055 or ancestors of these heads.
1053
1056
1054 All the ancestors of base are in self and in remote.
1057 All the ancestors of base are in self and in remote.
1055 All the descendants of the list returned are missing in self.
1058 All the descendants of the list returned are missing in self.
1056 (and so we know that the rest of the nodes are missing in remote, see
1059 (and so we know that the rest of the nodes are missing in remote, see
1057 outgoing)
1060 outgoing)
1058 """
1061 """
1059 m = self.changelog.nodemap
1062 m = self.changelog.nodemap
1060 search = []
1063 search = []
1061 fetch = {}
1064 fetch = {}
1062 seen = {}
1065 seen = {}
1063 seenbranch = {}
1066 seenbranch = {}
1064 if base == None:
1067 if base == None:
1065 base = {}
1068 base = {}
1066
1069
1067 if not heads:
1070 if not heads:
1068 heads = remote.heads()
1071 heads = remote.heads()
1069
1072
1070 if self.changelog.tip() == nullid:
1073 if self.changelog.tip() == nullid:
1071 base[nullid] = 1
1074 base[nullid] = 1
1072 if heads != [nullid]:
1075 if heads != [nullid]:
1073 return [nullid]
1076 return [nullid]
1074 return []
1077 return []
1075
1078
1076 # assume we're closer to the tip than the root
1079 # assume we're closer to the tip than the root
1077 # and start by examining the heads
1080 # and start by examining the heads
1078 self.ui.status(_("searching for changes\n"))
1081 self.ui.status(_("searching for changes\n"))
1079
1082
1080 unknown = []
1083 unknown = []
1081 for h in heads:
1084 for h in heads:
1082 if h not in m:
1085 if h not in m:
1083 unknown.append(h)
1086 unknown.append(h)
1084 else:
1087 else:
1085 base[h] = 1
1088 base[h] = 1
1086
1089
1087 if not unknown:
1090 if not unknown:
1088 return []
1091 return []
1089
1092
1090 req = dict.fromkeys(unknown)
1093 req = dict.fromkeys(unknown)
1091 reqcnt = 0
1094 reqcnt = 0
1092
1095
1093 # search through remote branches
1096 # search through remote branches
1094 # a 'branch' here is a linear segment of history, with four parts:
1097 # a 'branch' here is a linear segment of history, with four parts:
1095 # head, root, first parent, second parent
1098 # head, root, first parent, second parent
1096 # (a branch always has two parents (or none) by definition)
1099 # (a branch always has two parents (or none) by definition)
1097 unknown = remote.branches(unknown)
1100 unknown = remote.branches(unknown)
1098 while unknown:
1101 while unknown:
1099 r = []
1102 r = []
1100 while unknown:
1103 while unknown:
1101 n = unknown.pop(0)
1104 n = unknown.pop(0)
1102 if n[0] in seen:
1105 if n[0] in seen:
1103 continue
1106 continue
1104
1107
1105 self.ui.debug(_("examining %s:%s\n")
1108 self.ui.debug(_("examining %s:%s\n")
1106 % (short(n[0]), short(n[1])))
1109 % (short(n[0]), short(n[1])))
1107 if n[0] == nullid: # found the end of the branch
1110 if n[0] == nullid: # found the end of the branch
1108 pass
1111 pass
1109 elif n in seenbranch:
1112 elif n in seenbranch:
1110 self.ui.debug(_("branch already found\n"))
1113 self.ui.debug(_("branch already found\n"))
1111 continue
1114 continue
1112 elif n[1] and n[1] in m: # do we know the base?
1115 elif n[1] and n[1] in m: # do we know the base?
1113 self.ui.debug(_("found incomplete branch %s:%s\n")
1116 self.ui.debug(_("found incomplete branch %s:%s\n")
1114 % (short(n[0]), short(n[1])))
1117 % (short(n[0]), short(n[1])))
1115 search.append(n) # schedule branch range for scanning
1118 search.append(n) # schedule branch range for scanning
1116 seenbranch[n] = 1
1119 seenbranch[n] = 1
1117 else:
1120 else:
1118 if n[1] not in seen and n[1] not in fetch:
1121 if n[1] not in seen and n[1] not in fetch:
1119 if n[2] in m and n[3] in m:
1122 if n[2] in m and n[3] in m:
1120 self.ui.debug(_("found new changeset %s\n") %
1123 self.ui.debug(_("found new changeset %s\n") %
1121 short(n[1]))
1124 short(n[1]))
1122 fetch[n[1]] = 1 # earliest unknown
1125 fetch[n[1]] = 1 # earliest unknown
1123 for p in n[2:4]:
1126 for p in n[2:4]:
1124 if p in m:
1127 if p in m:
1125 base[p] = 1 # latest known
1128 base[p] = 1 # latest known
1126
1129
1127 for p in n[2:4]:
1130 for p in n[2:4]:
1128 if p not in req and p not in m:
1131 if p not in req and p not in m:
1129 r.append(p)
1132 r.append(p)
1130 req[p] = 1
1133 req[p] = 1
1131 seen[n[0]] = 1
1134 seen[n[0]] = 1
1132
1135
1133 if r:
1136 if r:
1134 reqcnt += 1
1137 reqcnt += 1
1135 self.ui.debug(_("request %d: %s\n") %
1138 self.ui.debug(_("request %d: %s\n") %
1136 (reqcnt, " ".join(map(short, r))))
1139 (reqcnt, " ".join(map(short, r))))
1137 for p in range(0, len(r), 10):
1140 for p in range(0, len(r), 10):
1138 for b in remote.branches(r[p:p+10]):
1141 for b in remote.branches(r[p:p+10]):
1139 self.ui.debug(_("received %s:%s\n") %
1142 self.ui.debug(_("received %s:%s\n") %
1140 (short(b[0]), short(b[1])))
1143 (short(b[0]), short(b[1])))
1141 unknown.append(b)
1144 unknown.append(b)
1142
1145
1143 # do binary search on the branches we found
1146 # do binary search on the branches we found
1144 while search:
1147 while search:
1145 n = search.pop(0)
1148 n = search.pop(0)
1146 reqcnt += 1
1149 reqcnt += 1
1147 l = remote.between([(n[0], n[1])])[0]
1150 l = remote.between([(n[0], n[1])])[0]
1148 l.append(n[1])
1151 l.append(n[1])
1149 p = n[0]
1152 p = n[0]
1150 f = 1
1153 f = 1
1151 for i in l:
1154 for i in l:
1152 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1155 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1153 if i in m:
1156 if i in m:
1154 if f <= 2:
1157 if f <= 2:
1155 self.ui.debug(_("found new branch changeset %s\n") %
1158 self.ui.debug(_("found new branch changeset %s\n") %
1156 short(p))
1159 short(p))
1157 fetch[p] = 1
1160 fetch[p] = 1
1158 base[i] = 1
1161 base[i] = 1
1159 else:
1162 else:
1160 self.ui.debug(_("narrowed branch search to %s:%s\n")
1163 self.ui.debug(_("narrowed branch search to %s:%s\n")
1161 % (short(p), short(i)))
1164 % (short(p), short(i)))
1162 search.append((p, i))
1165 search.append((p, i))
1163 break
1166 break
1164 p, f = i, f * 2
1167 p, f = i, f * 2
1165
1168
1166 # sanity check our fetch list
1169 # sanity check our fetch list
1167 for f in fetch.keys():
1170 for f in fetch.keys():
1168 if f in m:
1171 if f in m:
1169 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1172 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1170
1173
1171 if base.keys() == [nullid]:
1174 if base.keys() == [nullid]:
1172 if force:
1175 if force:
1173 self.ui.warn(_("warning: repository is unrelated\n"))
1176 self.ui.warn(_("warning: repository is unrelated\n"))
1174 else:
1177 else:
1175 raise util.Abort(_("repository is unrelated"))
1178 raise util.Abort(_("repository is unrelated"))
1176
1179
1177 self.ui.debug(_("found new changesets starting at ") +
1180 self.ui.debug(_("found new changesets starting at ") +
1178 " ".join([short(f) for f in fetch]) + "\n")
1181 " ".join([short(f) for f in fetch]) + "\n")
1179
1182
1180 self.ui.debug(_("%d total queries\n") % reqcnt)
1183 self.ui.debug(_("%d total queries\n") % reqcnt)
1181
1184
1182 return fetch.keys()
1185 return fetch.keys()
1183
1186
1184 def findoutgoing(self, remote, base=None, heads=None, force=False):
1187 def findoutgoing(self, remote, base=None, heads=None, force=False):
1185 """Return list of nodes that are roots of subsets not in remote
1188 """Return list of nodes that are roots of subsets not in remote
1186
1189
1187 If base dict is specified, assume that these nodes and their parents
1190 If base dict is specified, assume that these nodes and their parents
1188 exist on the remote side.
1191 exist on the remote side.
1189 If a list of heads is specified, return only nodes which are heads
1192 If a list of heads is specified, return only nodes which are heads
1190 or ancestors of these heads, and return a second element which
1193 or ancestors of these heads, and return a second element which
1191 contains all remote heads which get new children.
1194 contains all remote heads which get new children.
1192 """
1195 """
1193 if base == None:
1196 if base == None:
1194 base = {}
1197 base = {}
1195 self.findincoming(remote, base, heads, force=force)
1198 self.findincoming(remote, base, heads, force=force)
1196
1199
1197 self.ui.debug(_("common changesets up to ")
1200 self.ui.debug(_("common changesets up to ")
1198 + " ".join(map(short, base.keys())) + "\n")
1201 + " ".join(map(short, base.keys())) + "\n")
1199
1202
1200 remain = dict.fromkeys(self.changelog.nodemap)
1203 remain = dict.fromkeys(self.changelog.nodemap)
1201
1204
1202 # prune everything remote has from the tree
1205 # prune everything remote has from the tree
1203 del remain[nullid]
1206 del remain[nullid]
1204 remove = base.keys()
1207 remove = base.keys()
1205 while remove:
1208 while remove:
1206 n = remove.pop(0)
1209 n = remove.pop(0)
1207 if n in remain:
1210 if n in remain:
1208 del remain[n]
1211 del remain[n]
1209 for p in self.changelog.parents(n):
1212 for p in self.changelog.parents(n):
1210 remove.append(p)
1213 remove.append(p)
1211
1214
1212 # find every node whose parents have been pruned
1215 # find every node whose parents have been pruned
1213 subset = []
1216 subset = []
1214 # find every remote head that will get new children
1217 # find every remote head that will get new children
1215 updated_heads = {}
1218 updated_heads = {}
1216 for n in remain:
1219 for n in remain:
1217 p1, p2 = self.changelog.parents(n)
1220 p1, p2 = self.changelog.parents(n)
1218 if p1 not in remain and p2 not in remain:
1221 if p1 not in remain and p2 not in remain:
1219 subset.append(n)
1222 subset.append(n)
1220 if heads:
1223 if heads:
1221 if p1 in heads:
1224 if p1 in heads:
1222 updated_heads[p1] = True
1225 updated_heads[p1] = True
1223 if p2 in heads:
1226 if p2 in heads:
1224 updated_heads[p2] = True
1227 updated_heads[p2] = True
1225
1228
1226 # this is the set of all roots we have to push
1229 # this is the set of all roots we have to push
1227 if heads:
1230 if heads:
1228 return subset, updated_heads.keys()
1231 return subset, updated_heads.keys()
1229 else:
1232 else:
1230 return subset
1233 return subset
1231
1234
1232 def pull(self, remote, heads=None, force=False, lock=None):
1235 def pull(self, remote, heads=None, force=False, lock=None):
1233 mylock = False
1236 mylock = False
1234 if not lock:
1237 if not lock:
1235 lock = self.lock()
1238 lock = self.lock()
1236 mylock = True
1239 mylock = True
1237
1240
1238 try:
1241 try:
1239 fetch = self.findincoming(remote, force=force)
1242 fetch = self.findincoming(remote, force=force)
1240 if fetch == [nullid]:
1243 if fetch == [nullid]:
1241 self.ui.status(_("requesting all changes\n"))
1244 self.ui.status(_("requesting all changes\n"))
1242
1245
1243 if not fetch:
1246 if not fetch:
1244 self.ui.status(_("no changes found\n"))
1247 self.ui.status(_("no changes found\n"))
1245 return 0
1248 return 0
1246
1249
1247 if heads is None:
1250 if heads is None:
1248 cg = remote.changegroup(fetch, 'pull')
1251 cg = remote.changegroup(fetch, 'pull')
1249 else:
1252 else:
1250 if 'changegroupsubset' not in remote.capabilities:
1253 if 'changegroupsubset' not in remote.capabilities:
1251 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1254 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1252 cg = remote.changegroupsubset(fetch, heads, 'pull')
1255 cg = remote.changegroupsubset(fetch, heads, 'pull')
1253 return self.addchangegroup(cg, 'pull', remote.url())
1256 return self.addchangegroup(cg, 'pull', remote.url())
1254 finally:
1257 finally:
1255 if mylock:
1258 if mylock:
1256 lock.release()
1259 lock.release()
1257
1260
1258 def push(self, remote, force=False, revs=None):
1261 def push(self, remote, force=False, revs=None):
1259 # there are two ways to push to remote repo:
1262 # there are two ways to push to remote repo:
1260 #
1263 #
1261 # addchangegroup assumes local user can lock remote
1264 # addchangegroup assumes local user can lock remote
1262 # repo (local filesystem, old ssh servers).
1265 # repo (local filesystem, old ssh servers).
1263 #
1266 #
1264 # unbundle assumes local user cannot lock remote repo (new ssh
1267 # unbundle assumes local user cannot lock remote repo (new ssh
1265 # servers, http servers).
1268 # servers, http servers).
1266
1269
1267 if remote.capable('unbundle'):
1270 if remote.capable('unbundle'):
1268 return self.push_unbundle(remote, force, revs)
1271 return self.push_unbundle(remote, force, revs)
1269 return self.push_addchangegroup(remote, force, revs)
1272 return self.push_addchangegroup(remote, force, revs)
1270
1273
1271 def prepush(self, remote, force, revs):
1274 def prepush(self, remote, force, revs):
1272 base = {}
1275 base = {}
1273 remote_heads = remote.heads()
1276 remote_heads = remote.heads()
1274 inc = self.findincoming(remote, base, remote_heads, force=force)
1277 inc = self.findincoming(remote, base, remote_heads, force=force)
1275 if not force and inc:
1278 if not force and inc:
1276 self.ui.warn(_("abort: unsynced remote changes!\n"))
1279 self.ui.warn(_("abort: unsynced remote changes!\n"))
1277 self.ui.status(_("(did you forget to sync?"
1280 self.ui.status(_("(did you forget to sync?"
1278 " use push -f to force)\n"))
1281 " use push -f to force)\n"))
1279 return None, 1
1282 return None, 1
1280
1283
1281 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1284 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1282 if revs is not None:
1285 if revs is not None:
1283 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1286 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1284 else:
1287 else:
1285 bases, heads = update, self.changelog.heads()
1288 bases, heads = update, self.changelog.heads()
1286
1289
1287 if not bases:
1290 if not bases:
1288 self.ui.status(_("no changes found\n"))
1291 self.ui.status(_("no changes found\n"))
1289 return None, 1
1292 return None, 1
1290 elif not force:
1293 elif not force:
1291 # FIXME we don't properly detect creation of new heads
1294 # FIXME we don't properly detect creation of new heads
1292 # in the push -r case, assume the user knows what he's doing
1295 # in the push -r case, assume the user knows what he's doing
1293 if not revs and len(remote_heads) < len(heads) \
1296 if not revs and len(remote_heads) < len(heads) \
1294 and remote_heads != [nullid]:
1297 and remote_heads != [nullid]:
1295 self.ui.warn(_("abort: push creates new remote branches!\n"))
1298 self.ui.warn(_("abort: push creates new remote branches!\n"))
1296 self.ui.status(_("(did you forget to merge?"
1299 self.ui.status(_("(did you forget to merge?"
1297 " use push -f to force)\n"))
1300 " use push -f to force)\n"))
1298 return None, 1
1301 return None, 1
1299
1302
1300 if revs is None:
1303 if revs is None:
1301 cg = self.changegroup(update, 'push')
1304 cg = self.changegroup(update, 'push')
1302 else:
1305 else:
1303 cg = self.changegroupsubset(update, revs, 'push')
1306 cg = self.changegroupsubset(update, revs, 'push')
1304 return cg, remote_heads
1307 return cg, remote_heads
1305
1308
1306 def push_addchangegroup(self, remote, force, revs):
1309 def push_addchangegroup(self, remote, force, revs):
1307 lock = remote.lock()
1310 lock = remote.lock()
1308
1311
1309 ret = self.prepush(remote, force, revs)
1312 ret = self.prepush(remote, force, revs)
1310 if ret[0] is not None:
1313 if ret[0] is not None:
1311 cg, remote_heads = ret
1314 cg, remote_heads = ret
1312 return remote.addchangegroup(cg, 'push', self.url())
1315 return remote.addchangegroup(cg, 'push', self.url())
1313 return ret[1]
1316 return ret[1]
1314
1317
1315 def push_unbundle(self, remote, force, revs):
1318 def push_unbundle(self, remote, force, revs):
1316 # local repo finds heads on server, finds out what revs it
1319 # local repo finds heads on server, finds out what revs it
1317 # must push. once revs transferred, if server finds it has
1320 # must push. once revs transferred, if server finds it has
1318 # different heads (someone else won commit/push race), server
1321 # different heads (someone else won commit/push race), server
1319 # aborts.
1322 # aborts.
1320
1323
1321 ret = self.prepush(remote, force, revs)
1324 ret = self.prepush(remote, force, revs)
1322 if ret[0] is not None:
1325 if ret[0] is not None:
1323 cg, remote_heads = ret
1326 cg, remote_heads = ret
1324 if force: remote_heads = ['force']
1327 if force: remote_heads = ['force']
1325 return remote.unbundle(cg, remote_heads, 'push')
1328 return remote.unbundle(cg, remote_heads, 'push')
1326 return ret[1]
1329 return ret[1]
1327
1330
1328 def changegroupsubset(self, bases, heads, source):
1331 def changegroupsubset(self, bases, heads, source):
1329 """This function generates a changegroup consisting of all the nodes
1332 """This function generates a changegroup consisting of all the nodes
1330 that are descendents of any of the bases, and ancestors of any of
1333 that are descendents of any of the bases, and ancestors of any of
1331 the heads.
1334 the heads.
1332
1335
1333 It is fairly complex as determining which filenodes and which
1336 It is fairly complex as determining which filenodes and which
1334 manifest nodes need to be included for the changeset to be complete
1337 manifest nodes need to be included for the changeset to be complete
1335 is non-trivial.
1338 is non-trivial.
1336
1339
1337 Another wrinkle is doing the reverse, figuring out which changeset in
1340 Another wrinkle is doing the reverse, figuring out which changeset in
1338 the changegroup a particular filenode or manifestnode belongs to."""
1341 the changegroup a particular filenode or manifestnode belongs to."""
1339
1342
1340 self.hook('preoutgoing', throw=True, source=source)
1343 self.hook('preoutgoing', throw=True, source=source)
1341
1344
1342 # Set up some initial variables
1345 # Set up some initial variables
1343 # Make it easy to refer to self.changelog
1346 # Make it easy to refer to self.changelog
1344 cl = self.changelog
1347 cl = self.changelog
1345 # msng is short for missing - compute the list of changesets in this
1348 # msng is short for missing - compute the list of changesets in this
1346 # changegroup.
1349 # changegroup.
1347 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1350 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1348 # Some bases may turn out to be superfluous, and some heads may be
1351 # Some bases may turn out to be superfluous, and some heads may be
1349 # too. nodesbetween will return the minimal set of bases and heads
1352 # too. nodesbetween will return the minimal set of bases and heads
1350 # necessary to re-create the changegroup.
1353 # necessary to re-create the changegroup.
1351
1354
1352 # Known heads are the list of heads that it is assumed the recipient
1355 # Known heads are the list of heads that it is assumed the recipient
1353 # of this changegroup will know about.
1356 # of this changegroup will know about.
1354 knownheads = {}
1357 knownheads = {}
1355 # We assume that all parents of bases are known heads.
1358 # We assume that all parents of bases are known heads.
1356 for n in bases:
1359 for n in bases:
1357 for p in cl.parents(n):
1360 for p in cl.parents(n):
1358 if p != nullid:
1361 if p != nullid:
1359 knownheads[p] = 1
1362 knownheads[p] = 1
1360 knownheads = knownheads.keys()
1363 knownheads = knownheads.keys()
1361 if knownheads:
1364 if knownheads:
1362 # Now that we know what heads are known, we can compute which
1365 # Now that we know what heads are known, we can compute which
1363 # changesets are known. The recipient must know about all
1366 # changesets are known. The recipient must know about all
1364 # changesets required to reach the known heads from the null
1367 # changesets required to reach the known heads from the null
1365 # changeset.
1368 # changeset.
1366 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1369 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1367 junk = None
1370 junk = None
1368 # Transform the list into an ersatz set.
1371 # Transform the list into an ersatz set.
1369 has_cl_set = dict.fromkeys(has_cl_set)
1372 has_cl_set = dict.fromkeys(has_cl_set)
1370 else:
1373 else:
1371 # If there were no known heads, the recipient cannot be assumed to
1374 # If there were no known heads, the recipient cannot be assumed to
1372 # know about any changesets.
1375 # know about any changesets.
1373 has_cl_set = {}
1376 has_cl_set = {}
1374
1377
1375 # Make it easy to refer to self.manifest
1378 # Make it easy to refer to self.manifest
1376 mnfst = self.manifest
1379 mnfst = self.manifest
1377 # We don't know which manifests are missing yet
1380 # We don't know which manifests are missing yet
1378 msng_mnfst_set = {}
1381 msng_mnfst_set = {}
1379 # Nor do we know which filenodes are missing.
1382 # Nor do we know which filenodes are missing.
1380 msng_filenode_set = {}
1383 msng_filenode_set = {}
1381
1384
1382 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1385 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1383 junk = None
1386 junk = None
1384
1387
1385 # A changeset always belongs to itself, so the changenode lookup
1388 # A changeset always belongs to itself, so the changenode lookup
1386 # function for a changenode is identity.
1389 # function for a changenode is identity.
1387 def identity(x):
1390 def identity(x):
1388 return x
1391 return x
1389
1392
1390 # A function generating function. Sets up an environment for the
1393 # A function generating function. Sets up an environment for the
1391 # inner function.
1394 # inner function.
1392 def cmp_by_rev_func(revlog):
1395 def cmp_by_rev_func(revlog):
1393 # Compare two nodes by their revision number in the environment's
1396 # Compare two nodes by their revision number in the environment's
1394 # revision history. Since the revision number both represents the
1397 # revision history. Since the revision number both represents the
1395 # most efficient order to read the nodes in, and represents a
1398 # most efficient order to read the nodes in, and represents a
1396 # topological sorting of the nodes, this function is often useful.
1399 # topological sorting of the nodes, this function is often useful.
1397 def cmp_by_rev(a, b):
1400 def cmp_by_rev(a, b):
1398 return cmp(revlog.rev(a), revlog.rev(b))
1401 return cmp(revlog.rev(a), revlog.rev(b))
1399 return cmp_by_rev
1402 return cmp_by_rev
1400
1403
1401 # If we determine that a particular file or manifest node must be a
1404 # If we determine that a particular file or manifest node must be a
1402 # node that the recipient of the changegroup will already have, we can
1405 # node that the recipient of the changegroup will already have, we can
1403 # also assume the recipient will have all the parents. This function
1406 # also assume the recipient will have all the parents. This function
1404 # prunes them from the set of missing nodes.
1407 # prunes them from the set of missing nodes.
1405 def prune_parents(revlog, hasset, msngset):
1408 def prune_parents(revlog, hasset, msngset):
1406 haslst = hasset.keys()
1409 haslst = hasset.keys()
1407 haslst.sort(cmp_by_rev_func(revlog))
1410 haslst.sort(cmp_by_rev_func(revlog))
1408 for node in haslst:
1411 for node in haslst:
1409 parentlst = [p for p in revlog.parents(node) if p != nullid]
1412 parentlst = [p for p in revlog.parents(node) if p != nullid]
1410 while parentlst:
1413 while parentlst:
1411 n = parentlst.pop()
1414 n = parentlst.pop()
1412 if n not in hasset:
1415 if n not in hasset:
1413 hasset[n] = 1
1416 hasset[n] = 1
1414 p = [p for p in revlog.parents(n) if p != nullid]
1417 p = [p for p in revlog.parents(n) if p != nullid]
1415 parentlst.extend(p)
1418 parentlst.extend(p)
1416 for n in hasset:
1419 for n in hasset:
1417 msngset.pop(n, None)
1420 msngset.pop(n, None)
1418
1421
1419 # This is a function generating function used to set up an environment
1422 # This is a function generating function used to set up an environment
1420 # for the inner function to execute in.
1423 # for the inner function to execute in.
1421 def manifest_and_file_collector(changedfileset):
1424 def manifest_and_file_collector(changedfileset):
1422 # This is an information gathering function that gathers
1425 # This is an information gathering function that gathers
1423 # information from each changeset node that goes out as part of
1426 # information from each changeset node that goes out as part of
1424 # the changegroup. The information gathered is a list of which
1427 # the changegroup. The information gathered is a list of which
1425 # manifest nodes are potentially required (the recipient may
1428 # manifest nodes are potentially required (the recipient may
1426 # already have them) and total list of all files which were
1429 # already have them) and total list of all files which were
1427 # changed in any changeset in the changegroup.
1430 # changed in any changeset in the changegroup.
1428 #
1431 #
1429 # We also remember the first changenode we saw any manifest
1432 # We also remember the first changenode we saw any manifest
1430 # referenced by so we can later determine which changenode 'owns'
1433 # referenced by so we can later determine which changenode 'owns'
1431 # the manifest.
1434 # the manifest.
1432 def collect_manifests_and_files(clnode):
1435 def collect_manifests_and_files(clnode):
1433 c = cl.read(clnode)
1436 c = cl.read(clnode)
1434 for f in c[3]:
1437 for f in c[3]:
1435 # This is to make sure we only have one instance of each
1438 # This is to make sure we only have one instance of each
1436 # filename string for each filename.
1439 # filename string for each filename.
1437 changedfileset.setdefault(f, f)
1440 changedfileset.setdefault(f, f)
1438 msng_mnfst_set.setdefault(c[0], clnode)
1441 msng_mnfst_set.setdefault(c[0], clnode)
1439 return collect_manifests_and_files
1442 return collect_manifests_and_files
1440
1443
1441 # Figure out which manifest nodes (of the ones we think might be part
1444 # Figure out which manifest nodes (of the ones we think might be part
1442 # of the changegroup) the recipient must know about and remove them
1445 # of the changegroup) the recipient must know about and remove them
1443 # from the changegroup.
1446 # from the changegroup.
1444 def prune_manifests():
1447 def prune_manifests():
1445 has_mnfst_set = {}
1448 has_mnfst_set = {}
1446 for n in msng_mnfst_set:
1449 for n in msng_mnfst_set:
1447 # If a 'missing' manifest thinks it belongs to a changenode
1450 # If a 'missing' manifest thinks it belongs to a changenode
1448 # the recipient is assumed to have, obviously the recipient
1451 # the recipient is assumed to have, obviously the recipient
1449 # must have that manifest.
1452 # must have that manifest.
1450 linknode = cl.node(mnfst.linkrev(n))
1453 linknode = cl.node(mnfst.linkrev(n))
1451 if linknode in has_cl_set:
1454 if linknode in has_cl_set:
1452 has_mnfst_set[n] = 1
1455 has_mnfst_set[n] = 1
1453 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1456 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1454
1457
1455 # Use the information collected in collect_manifests_and_files to say
1458 # Use the information collected in collect_manifests_and_files to say
1456 # which changenode any manifestnode belongs to.
1459 # which changenode any manifestnode belongs to.
1457 def lookup_manifest_link(mnfstnode):
1460 def lookup_manifest_link(mnfstnode):
1458 return msng_mnfst_set[mnfstnode]
1461 return msng_mnfst_set[mnfstnode]
1459
1462
1460 # A function generating function that sets up the initial environment
1463 # A function generating function that sets up the initial environment
1461 # the inner function.
1464 # the inner function.
1462 def filenode_collector(changedfiles):
1465 def filenode_collector(changedfiles):
1463 next_rev = [0]
1466 next_rev = [0]
1464 # This gathers information from each manifestnode included in the
1467 # This gathers information from each manifestnode included in the
1465 # changegroup about which filenodes the manifest node references
1468 # changegroup about which filenodes the manifest node references
1466 # so we can include those in the changegroup too.
1469 # so we can include those in the changegroup too.
1467 #
1470 #
1468 # It also remembers which changenode each filenode belongs to. It
1471 # It also remembers which changenode each filenode belongs to. It
1469 # does this by assuming the a filenode belongs to the changenode
1472 # does this by assuming the a filenode belongs to the changenode
1470 # the first manifest that references it belongs to.
1473 # the first manifest that references it belongs to.
1471 def collect_msng_filenodes(mnfstnode):
1474 def collect_msng_filenodes(mnfstnode):
1472 r = mnfst.rev(mnfstnode)
1475 r = mnfst.rev(mnfstnode)
1473 if r == next_rev[0]:
1476 if r == next_rev[0]:
1474 # If the last rev we looked at was the one just previous,
1477 # If the last rev we looked at was the one just previous,
1475 # we only need to see a diff.
1478 # we only need to see a diff.
1476 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1479 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1477 # For each line in the delta
1480 # For each line in the delta
1478 for dline in delta.splitlines():
1481 for dline in delta.splitlines():
1479 # get the filename and filenode for that line
1482 # get the filename and filenode for that line
1480 f, fnode = dline.split('\0')
1483 f, fnode = dline.split('\0')
1481 fnode = bin(fnode[:40])
1484 fnode = bin(fnode[:40])
1482 f = changedfiles.get(f, None)
1485 f = changedfiles.get(f, None)
1483 # And if the file is in the list of files we care
1486 # And if the file is in the list of files we care
1484 # about.
1487 # about.
1485 if f is not None:
1488 if f is not None:
1486 # Get the changenode this manifest belongs to
1489 # Get the changenode this manifest belongs to
1487 clnode = msng_mnfst_set[mnfstnode]
1490 clnode = msng_mnfst_set[mnfstnode]
1488 # Create the set of filenodes for the file if
1491 # Create the set of filenodes for the file if
1489 # there isn't one already.
1492 # there isn't one already.
1490 ndset = msng_filenode_set.setdefault(f, {})
1493 ndset = msng_filenode_set.setdefault(f, {})
1491 # And set the filenode's changelog node to the
1494 # And set the filenode's changelog node to the
1492 # manifest's if it hasn't been set already.
1495 # manifest's if it hasn't been set already.
1493 ndset.setdefault(fnode, clnode)
1496 ndset.setdefault(fnode, clnode)
1494 else:
1497 else:
1495 # Otherwise we need a full manifest.
1498 # Otherwise we need a full manifest.
1496 m = mnfst.read(mnfstnode)
1499 m = mnfst.read(mnfstnode)
1497 # For every file in we care about.
1500 # For every file in we care about.
1498 for f in changedfiles:
1501 for f in changedfiles:
1499 fnode = m.get(f, None)
1502 fnode = m.get(f, None)
1500 # If it's in the manifest
1503 # If it's in the manifest
1501 if fnode is not None:
1504 if fnode is not None:
1502 # See comments above.
1505 # See comments above.
1503 clnode = msng_mnfst_set[mnfstnode]
1506 clnode = msng_mnfst_set[mnfstnode]
1504 ndset = msng_filenode_set.setdefault(f, {})
1507 ndset = msng_filenode_set.setdefault(f, {})
1505 ndset.setdefault(fnode, clnode)
1508 ndset.setdefault(fnode, clnode)
1506 # Remember the revision we hope to see next.
1509 # Remember the revision we hope to see next.
1507 next_rev[0] = r + 1
1510 next_rev[0] = r + 1
1508 return collect_msng_filenodes
1511 return collect_msng_filenodes
1509
1512
1510 # We have a list of filenodes we think we need for a file, lets remove
1513 # We have a list of filenodes we think we need for a file, lets remove
1511 # all those we now the recipient must have.
1514 # all those we now the recipient must have.
1512 def prune_filenodes(f, filerevlog):
1515 def prune_filenodes(f, filerevlog):
1513 msngset = msng_filenode_set[f]
1516 msngset = msng_filenode_set[f]
1514 hasset = {}
1517 hasset = {}
1515 # If a 'missing' filenode thinks it belongs to a changenode we
1518 # If a 'missing' filenode thinks it belongs to a changenode we
1516 # assume the recipient must have, then the recipient must have
1519 # assume the recipient must have, then the recipient must have
1517 # that filenode.
1520 # that filenode.
1518 for n in msngset:
1521 for n in msngset:
1519 clnode = cl.node(filerevlog.linkrev(n))
1522 clnode = cl.node(filerevlog.linkrev(n))
1520 if clnode in has_cl_set:
1523 if clnode in has_cl_set:
1521 hasset[n] = 1
1524 hasset[n] = 1
1522 prune_parents(filerevlog, hasset, msngset)
1525 prune_parents(filerevlog, hasset, msngset)
1523
1526
1524 # A function generator function that sets up the a context for the
1527 # A function generator function that sets up the a context for the
1525 # inner function.
1528 # inner function.
1526 def lookup_filenode_link_func(fname):
1529 def lookup_filenode_link_func(fname):
1527 msngset = msng_filenode_set[fname]
1530 msngset = msng_filenode_set[fname]
1528 # Lookup the changenode the filenode belongs to.
1531 # Lookup the changenode the filenode belongs to.
1529 def lookup_filenode_link(fnode):
1532 def lookup_filenode_link(fnode):
1530 return msngset[fnode]
1533 return msngset[fnode]
1531 return lookup_filenode_link
1534 return lookup_filenode_link
1532
1535
1533 # Now that we have all theses utility functions to help out and
1536 # Now that we have all theses utility functions to help out and
1534 # logically divide up the task, generate the group.
1537 # logically divide up the task, generate the group.
1535 def gengroup():
1538 def gengroup():
1536 # The set of changed files starts empty.
1539 # The set of changed files starts empty.
1537 changedfiles = {}
1540 changedfiles = {}
1538 # Create a changenode group generator that will call our functions
1541 # Create a changenode group generator that will call our functions
1539 # back to lookup the owning changenode and collect information.
1542 # back to lookup the owning changenode and collect information.
1540 group = cl.group(msng_cl_lst, identity,
1543 group = cl.group(msng_cl_lst, identity,
1541 manifest_and_file_collector(changedfiles))
1544 manifest_and_file_collector(changedfiles))
1542 for chnk in group:
1545 for chnk in group:
1543 yield chnk
1546 yield chnk
1544
1547
1545 # The list of manifests has been collected by the generator
1548 # The list of manifests has been collected by the generator
1546 # calling our functions back.
1549 # calling our functions back.
1547 prune_manifests()
1550 prune_manifests()
1548 msng_mnfst_lst = msng_mnfst_set.keys()
1551 msng_mnfst_lst = msng_mnfst_set.keys()
1549 # Sort the manifestnodes by revision number.
1552 # Sort the manifestnodes by revision number.
1550 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1553 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1551 # Create a generator for the manifestnodes that calls our lookup
1554 # Create a generator for the manifestnodes that calls our lookup
1552 # and data collection functions back.
1555 # and data collection functions back.
1553 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1556 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1554 filenode_collector(changedfiles))
1557 filenode_collector(changedfiles))
1555 for chnk in group:
1558 for chnk in group:
1556 yield chnk
1559 yield chnk
1557
1560
1558 # These are no longer needed, dereference and toss the memory for
1561 # These are no longer needed, dereference and toss the memory for
1559 # them.
1562 # them.
1560 msng_mnfst_lst = None
1563 msng_mnfst_lst = None
1561 msng_mnfst_set.clear()
1564 msng_mnfst_set.clear()
1562
1565
1563 changedfiles = changedfiles.keys()
1566 changedfiles = changedfiles.keys()
1564 changedfiles.sort()
1567 changedfiles.sort()
1565 # Go through all our files in order sorted by name.
1568 # Go through all our files in order sorted by name.
1566 for fname in changedfiles:
1569 for fname in changedfiles:
1567 filerevlog = self.file(fname)
1570 filerevlog = self.file(fname)
1568 # Toss out the filenodes that the recipient isn't really
1571 # Toss out the filenodes that the recipient isn't really
1569 # missing.
1572 # missing.
1570 if msng_filenode_set.has_key(fname):
1573 if msng_filenode_set.has_key(fname):
1571 prune_filenodes(fname, filerevlog)
1574 prune_filenodes(fname, filerevlog)
1572 msng_filenode_lst = msng_filenode_set[fname].keys()
1575 msng_filenode_lst = msng_filenode_set[fname].keys()
1573 else:
1576 else:
1574 msng_filenode_lst = []
1577 msng_filenode_lst = []
1575 # If any filenodes are left, generate the group for them,
1578 # If any filenodes are left, generate the group for them,
1576 # otherwise don't bother.
1579 # otherwise don't bother.
1577 if len(msng_filenode_lst) > 0:
1580 if len(msng_filenode_lst) > 0:
1578 yield changegroup.genchunk(fname)
1581 yield changegroup.genchunk(fname)
1579 # Sort the filenodes by their revision #
1582 # Sort the filenodes by their revision #
1580 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1583 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1581 # Create a group generator and only pass in a changenode
1584 # Create a group generator and only pass in a changenode
1582 # lookup function as we need to collect no information
1585 # lookup function as we need to collect no information
1583 # from filenodes.
1586 # from filenodes.
1584 group = filerevlog.group(msng_filenode_lst,
1587 group = filerevlog.group(msng_filenode_lst,
1585 lookup_filenode_link_func(fname))
1588 lookup_filenode_link_func(fname))
1586 for chnk in group:
1589 for chnk in group:
1587 yield chnk
1590 yield chnk
1588 if msng_filenode_set.has_key(fname):
1591 if msng_filenode_set.has_key(fname):
1589 # Don't need this anymore, toss it to free memory.
1592 # Don't need this anymore, toss it to free memory.
1590 del msng_filenode_set[fname]
1593 del msng_filenode_set[fname]
1591 # Signal that no more groups are left.
1594 # Signal that no more groups are left.
1592 yield changegroup.closechunk()
1595 yield changegroup.closechunk()
1593
1596
1594 if msng_cl_lst:
1597 if msng_cl_lst:
1595 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1598 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1596
1599
1597 return util.chunkbuffer(gengroup())
1600 return util.chunkbuffer(gengroup())
1598
1601
1599 def changegroup(self, basenodes, source):
1602 def changegroup(self, basenodes, source):
1600 """Generate a changegroup of all nodes that we have that a recipient
1603 """Generate a changegroup of all nodes that we have that a recipient
1601 doesn't.
1604 doesn't.
1602
1605
1603 This is much easier than the previous function as we can assume that
1606 This is much easier than the previous function as we can assume that
1604 the recipient has any changenode we aren't sending them."""
1607 the recipient has any changenode we aren't sending them."""
1605
1608
1606 self.hook('preoutgoing', throw=True, source=source)
1609 self.hook('preoutgoing', throw=True, source=source)
1607
1610
1608 cl = self.changelog
1611 cl = self.changelog
1609 nodes = cl.nodesbetween(basenodes, None)[0]
1612 nodes = cl.nodesbetween(basenodes, None)[0]
1610 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1613 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1611
1614
1612 def identity(x):
1615 def identity(x):
1613 return x
1616 return x
1614
1617
1615 def gennodelst(revlog):
1618 def gennodelst(revlog):
1616 for r in xrange(0, revlog.count()):
1619 for r in xrange(0, revlog.count()):
1617 n = revlog.node(r)
1620 n = revlog.node(r)
1618 if revlog.linkrev(n) in revset:
1621 if revlog.linkrev(n) in revset:
1619 yield n
1622 yield n
1620
1623
1621 def changed_file_collector(changedfileset):
1624 def changed_file_collector(changedfileset):
1622 def collect_changed_files(clnode):
1625 def collect_changed_files(clnode):
1623 c = cl.read(clnode)
1626 c = cl.read(clnode)
1624 for fname in c[3]:
1627 for fname in c[3]:
1625 changedfileset[fname] = 1
1628 changedfileset[fname] = 1
1626 return collect_changed_files
1629 return collect_changed_files
1627
1630
1628 def lookuprevlink_func(revlog):
1631 def lookuprevlink_func(revlog):
1629 def lookuprevlink(n):
1632 def lookuprevlink(n):
1630 return cl.node(revlog.linkrev(n))
1633 return cl.node(revlog.linkrev(n))
1631 return lookuprevlink
1634 return lookuprevlink
1632
1635
1633 def gengroup():
1636 def gengroup():
1634 # construct a list of all changed files
1637 # construct a list of all changed files
1635 changedfiles = {}
1638 changedfiles = {}
1636
1639
1637 for chnk in cl.group(nodes, identity,
1640 for chnk in cl.group(nodes, identity,
1638 changed_file_collector(changedfiles)):
1641 changed_file_collector(changedfiles)):
1639 yield chnk
1642 yield chnk
1640 changedfiles = changedfiles.keys()
1643 changedfiles = changedfiles.keys()
1641 changedfiles.sort()
1644 changedfiles.sort()
1642
1645
1643 mnfst = self.manifest
1646 mnfst = self.manifest
1644 nodeiter = gennodelst(mnfst)
1647 nodeiter = gennodelst(mnfst)
1645 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1648 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1646 yield chnk
1649 yield chnk
1647
1650
1648 for fname in changedfiles:
1651 for fname in changedfiles:
1649 filerevlog = self.file(fname)
1652 filerevlog = self.file(fname)
1650 nodeiter = gennodelst(filerevlog)
1653 nodeiter = gennodelst(filerevlog)
1651 nodeiter = list(nodeiter)
1654 nodeiter = list(nodeiter)
1652 if nodeiter:
1655 if nodeiter:
1653 yield changegroup.genchunk(fname)
1656 yield changegroup.genchunk(fname)
1654 lookup = lookuprevlink_func(filerevlog)
1657 lookup = lookuprevlink_func(filerevlog)
1655 for chnk in filerevlog.group(nodeiter, lookup):
1658 for chnk in filerevlog.group(nodeiter, lookup):
1656 yield chnk
1659 yield chnk
1657
1660
1658 yield changegroup.closechunk()
1661 yield changegroup.closechunk()
1659
1662
1660 if nodes:
1663 if nodes:
1661 self.hook('outgoing', node=hex(nodes[0]), source=source)
1664 self.hook('outgoing', node=hex(nodes[0]), source=source)
1662
1665
1663 return util.chunkbuffer(gengroup())
1666 return util.chunkbuffer(gengroup())
1664
1667
1665 def addchangegroup(self, source, srctype, url):
1668 def addchangegroup(self, source, srctype, url):
1666 """add changegroup to repo.
1669 """add changegroup to repo.
1667 returns number of heads modified or added + 1."""
1670 returns number of heads modified or added + 1."""
1668
1671
1669 def csmap(x):
1672 def csmap(x):
1670 self.ui.debug(_("add changeset %s\n") % short(x))
1673 self.ui.debug(_("add changeset %s\n") % short(x))
1671 return cl.count()
1674 return cl.count()
1672
1675
1673 def revmap(x):
1676 def revmap(x):
1674 return cl.rev(x)
1677 return cl.rev(x)
1675
1678
1676 if not source:
1679 if not source:
1677 return 0
1680 return 0
1678
1681
1679 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1682 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1680
1683
1681 changesets = files = revisions = 0
1684 changesets = files = revisions = 0
1682
1685
1683 tr = self.transaction()
1686 tr = self.transaction()
1684
1687
1685 # write changelog data to temp files so concurrent readers will not see
1688 # write changelog data to temp files so concurrent readers will not see
1686 # inconsistent view
1689 # inconsistent view
1687 cl = None
1690 cl = None
1688 try:
1691 try:
1689 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1692 cl = appendfile.appendchangelog(self.sopener,
1693 self.changelog.version)
1690
1694
1691 oldheads = len(cl.heads())
1695 oldheads = len(cl.heads())
1692
1696
1693 # pull off the changeset group
1697 # pull off the changeset group
1694 self.ui.status(_("adding changesets\n"))
1698 self.ui.status(_("adding changesets\n"))
1695 cor = cl.count() - 1
1699 cor = cl.count() - 1
1696 chunkiter = changegroup.chunkiter(source)
1700 chunkiter = changegroup.chunkiter(source)
1697 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1701 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1698 raise util.Abort(_("received changelog group is empty"))
1702 raise util.Abort(_("received changelog group is empty"))
1699 cnr = cl.count() - 1
1703 cnr = cl.count() - 1
1700 changesets = cnr - cor
1704 changesets = cnr - cor
1701
1705
1702 # pull off the manifest group
1706 # pull off the manifest group
1703 self.ui.status(_("adding manifests\n"))
1707 self.ui.status(_("adding manifests\n"))
1704 chunkiter = changegroup.chunkiter(source)
1708 chunkiter = changegroup.chunkiter(source)
1705 # no need to check for empty manifest group here:
1709 # no need to check for empty manifest group here:
1706 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1710 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1707 # no new manifest will be created and the manifest group will
1711 # no new manifest will be created and the manifest group will
1708 # be empty during the pull
1712 # be empty during the pull
1709 self.manifest.addgroup(chunkiter, revmap, tr)
1713 self.manifest.addgroup(chunkiter, revmap, tr)
1710
1714
1711 # process the files
1715 # process the files
1712 self.ui.status(_("adding file changes\n"))
1716 self.ui.status(_("adding file changes\n"))
1713 while 1:
1717 while 1:
1714 f = changegroup.getchunk(source)
1718 f = changegroup.getchunk(source)
1715 if not f:
1719 if not f:
1716 break
1720 break
1717 self.ui.debug(_("adding %s revisions\n") % f)
1721 self.ui.debug(_("adding %s revisions\n") % f)
1718 fl = self.file(f)
1722 fl = self.file(f)
1719 o = fl.count()
1723 o = fl.count()
1720 chunkiter = changegroup.chunkiter(source)
1724 chunkiter = changegroup.chunkiter(source)
1721 if fl.addgroup(chunkiter, revmap, tr) is None:
1725 if fl.addgroup(chunkiter, revmap, tr) is None:
1722 raise util.Abort(_("received file revlog group is empty"))
1726 raise util.Abort(_("received file revlog group is empty"))
1723 revisions += fl.count() - o
1727 revisions += fl.count() - o
1724 files += 1
1728 files += 1
1725
1729
1726 cl.writedata()
1730 cl.writedata()
1727 finally:
1731 finally:
1728 if cl:
1732 if cl:
1729 cl.cleanup()
1733 cl.cleanup()
1730
1734
1731 # make changelog see real files again
1735 # make changelog see real files again
1732 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1736 self.changelog = changelog.changelog(self.sopener,
1737 self.changelog.version)
1733 self.changelog.checkinlinesize(tr)
1738 self.changelog.checkinlinesize(tr)
1734
1739
1735 newheads = len(self.changelog.heads())
1740 newheads = len(self.changelog.heads())
1736 heads = ""
1741 heads = ""
1737 if oldheads and newheads != oldheads:
1742 if oldheads and newheads != oldheads:
1738 heads = _(" (%+d heads)") % (newheads - oldheads)
1743 heads = _(" (%+d heads)") % (newheads - oldheads)
1739
1744
1740 self.ui.status(_("added %d changesets"
1745 self.ui.status(_("added %d changesets"
1741 " with %d changes to %d files%s\n")
1746 " with %d changes to %d files%s\n")
1742 % (changesets, revisions, files, heads))
1747 % (changesets, revisions, files, heads))
1743
1748
1744 if changesets > 0:
1749 if changesets > 0:
1745 self.hook('pretxnchangegroup', throw=True,
1750 self.hook('pretxnchangegroup', throw=True,
1746 node=hex(self.changelog.node(cor+1)), source=srctype,
1751 node=hex(self.changelog.node(cor+1)), source=srctype,
1747 url=url)
1752 url=url)
1748
1753
1749 tr.close()
1754 tr.close()
1750
1755
1751 if changesets > 0:
1756 if changesets > 0:
1752 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1757 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1753 source=srctype, url=url)
1758 source=srctype, url=url)
1754
1759
1755 for i in range(cor + 1, cnr + 1):
1760 for i in range(cor + 1, cnr + 1):
1756 self.hook("incoming", node=hex(self.changelog.node(i)),
1761 self.hook("incoming", node=hex(self.changelog.node(i)),
1757 source=srctype, url=url)
1762 source=srctype, url=url)
1758
1763
1759 return newheads - oldheads + 1
1764 return newheads - oldheads + 1
1760
1765
1761
1766
1762 def stream_in(self, remote):
1767 def stream_in(self, remote):
1763 fp = remote.stream_out()
1768 fp = remote.stream_out()
1764 resp = int(fp.readline())
1769 resp = int(fp.readline())
1765 if resp != 0:
1770 if resp != 0:
1766 raise util.Abort(_('operation forbidden by server'))
1771 raise util.Abort(_('operation forbidden by server'))
1767 self.ui.status(_('streaming all changes\n'))
1772 self.ui.status(_('streaming all changes\n'))
1768 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1773 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1769 self.ui.status(_('%d files to transfer, %s of data\n') %
1774 self.ui.status(_('%d files to transfer, %s of data\n') %
1770 (total_files, util.bytecount(total_bytes)))
1775 (total_files, util.bytecount(total_bytes)))
1771 start = time.time()
1776 start = time.time()
1772 for i in xrange(total_files):
1777 for i in xrange(total_files):
1773 name, size = fp.readline().split('\0', 1)
1778 name, size = fp.readline().split('\0', 1)
1774 size = int(size)
1779 size = int(size)
1775 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1780 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1776 ofp = self.opener(name, 'w')
1781 ofp = self.sopener(name, 'w')
1777 for chunk in util.filechunkiter(fp, limit=size):
1782 for chunk in util.filechunkiter(fp, limit=size):
1778 ofp.write(chunk)
1783 ofp.write(chunk)
1779 ofp.close()
1784 ofp.close()
1780 elapsed = time.time() - start
1785 elapsed = time.time() - start
1781 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1786 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1782 (util.bytecount(total_bytes), elapsed,
1787 (util.bytecount(total_bytes), elapsed,
1783 util.bytecount(total_bytes / elapsed)))
1788 util.bytecount(total_bytes / elapsed)))
1784 self.reload()
1789 self.reload()
1785 return len(self.heads()) + 1
1790 return len(self.heads()) + 1
1786
1791
1787 def clone(self, remote, heads=[], stream=False):
1792 def clone(self, remote, heads=[], stream=False):
1788 '''clone remote repository.
1793 '''clone remote repository.
1789
1794
1790 keyword arguments:
1795 keyword arguments:
1791 heads: list of revs to clone (forces use of pull)
1796 heads: list of revs to clone (forces use of pull)
1792 stream: use streaming clone if possible'''
1797 stream: use streaming clone if possible'''
1793
1798
1794 # now, all clients that can request uncompressed clones can
1799 # now, all clients that can request uncompressed clones can
1795 # read repo formats supported by all servers that can serve
1800 # read repo formats supported by all servers that can serve
1796 # them.
1801 # them.
1797
1802
1798 # if revlog format changes, client will have to check version
1803 # if revlog format changes, client will have to check version
1799 # and format flags on "stream" capability, and use
1804 # and format flags on "stream" capability, and use
1800 # uncompressed only if compatible.
1805 # uncompressed only if compatible.
1801
1806
1802 if stream and not heads and remote.capable('stream'):
1807 if stream and not heads and remote.capable('stream'):
1803 return self.stream_in(remote)
1808 return self.stream_in(remote)
1804 return self.pull(remote, heads)
1809 return self.pull(remote, heads)
1805
1810
1806 # used to avoid circular references so destructors work
1811 # used to avoid circular references so destructors work
1807 def aftertrans(base):
1812 def aftertrans(base):
1808 p = base
1813 p = base
1809 def a():
1814 def a():
1810 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1815 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1811 util.rename(os.path.join(p, "journal.dirstate"),
1816 util.rename(os.path.join(p, "journal.dirstate"),
1812 os.path.join(p, "undo.dirstate"))
1817 os.path.join(p, "undo.dirstate"))
1813 return a
1818 return a
1814
1819
1815 def instance(ui, path, create):
1820 def instance(ui, path, create):
1816 return localrepository(ui, util.drop_scheme('file', path), create)
1821 return localrepository(ui, util.drop_scheme('file', path), create)
1817
1822
1818 def islocal(path):
1823 def islocal(path):
1819 return True
1824 return True
@@ -1,64 +1,65 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import *
10 from demandload import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "changelog filelog httprangereader")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 raise IOError(None, inst)
20 raise IOError(None, inst)
21 except urllib2.URLError, inst:
21 except urllib2.URLError, inst:
22 raise IOError(None, inst.reason[1])
22 raise IOError(None, inst.reason[1])
23
23
24 def opener(base):
24 def opener(base):
25 """return a function that opens files over http"""
25 """return a function that opens files over http"""
26 p = base
26 p = base
27 def o(path, mode="r"):
27 def o(path, mode="r"):
28 f = os.path.join(p, urllib.quote(path))
28 f = os.path.join(p, urllib.quote(path))
29 return rangereader(f)
29 return rangereader(f)
30 return o
30 return o
31
31
32 class statichttprepository(localrepo.localrepository):
32 class statichttprepository(localrepo.localrepository):
33 def __init__(self, ui, path):
33 def __init__(self, ui, path):
34 self._url = path
34 self._url = path
35 self.path = (path + "/.hg")
35 self.path = (path + "/.hg")
36 self.ui = ui
36 self.ui = ui
37 self.revlogversion = 0
37 self.revlogversion = 0
38 self.opener = opener(self.path)
38 self.opener = opener(self.path)
39 self.sopener = opener(self.path)
39 self.manifest = manifest.manifest(self.opener)
40 self.manifest = manifest.manifest(self.opener)
40 self.changelog = changelog.changelog(self.opener)
41 self.changelog = changelog.changelog(self.opener)
41 self.tagscache = None
42 self.tagscache = None
42 self.nodetagscache = None
43 self.nodetagscache = None
43 self.encodepats = None
44 self.encodepats = None
44 self.decodepats = None
45 self.decodepats = None
45
46
46 def url(self):
47 def url(self):
47 return 'static-' + self._url
48 return 'static-' + self._url
48
49
49 def dev(self):
50 def dev(self):
50 return -1
51 return -1
51
52
52 def local(self):
53 def local(self):
53 return False
54 return False
54
55
55 def instance(ui, path, create):
56 def instance(ui, path, create):
56 if create:
57 if create:
57 raise util.Abort(_('cannot create new static-http repository'))
58 raise util.Abort(_('cannot create new static-http repository'))
58 if path.startswith('old-http:'):
59 if path.startswith('old-http:'):
59 ui.warn(_("old-http:// syntax is deprecated, "
60 ui.warn(_("old-http:// syntax is deprecated, "
60 "please use static-http:// instead\n"))
61 "please use static-http:// instead\n"))
61 path = path[4:]
62 path = path[4:]
62 else:
63 else:
63 path = path[7:]
64 path = path[7:]
64 return statichttprepository(ui, path)
65 return statichttprepository(ui, path)
General Comments 0
You need to be logged in to leave comments. Login now