##// END OF EJS Templates
introduce store classes...
Adrian Buehlmann -
r6840:80e51429 default
parent child Browse files
Show More
@@ -1,2076 +1,2074
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 self.store = store.store(requirements, self.path)
64 if "store" in requirements:
65 self.encodefn = store.encodefilename
66 self.decodefn = store.decodefilename
67 self.spath = os.path.join(self.path, "store")
68 else:
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
72
64
73 try:
65 self.spath = self.store.path
74 # files in .hg/ will be created using this mode
66 self.sopener = self.store.opener
75 mode = os.stat(self.spath).st_mode
67 self.sjoin = self.store.join
76 # avoid some useless chmods
68 self._createmode = self.store.createmode
77 if (0777 & ~util._umask) == (0777 & mode):
69 self.opener.createmode = self.store.createmode
78 mode = None
79 except OSError:
80 mode = None
81
82 self._createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
86 self.sopener = store.encodedopener(sopener, self.encodefn)
87
70
88 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
89 try:
72 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
92 except IOError:
75 except IOError:
93 pass
76 pass
94
77
95 self.tagscache = None
78 self.tagscache = None
96 self._tagstypecache = None
79 self._tagstypecache = None
97 self.branchcache = None
80 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
82 self._branchcachetip = None
100 self.nodetagscache = None
83 self.nodetagscache = None
101 self.filterpats = {}
84 self.filterpats = {}
102 self._datafilters = {}
85 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
104
87
105 def __getattr__(self, name):
88 def __getattr__(self, name):
106 if name == 'changelog':
89 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
109 return self.changelog
92 return self.changelog
110 if name == 'manifest':
93 if name == 'manifest':
111 self.changelog
94 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
96 return self.manifest
114 if name == 'dirstate':
97 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
99 return self.dirstate
117 else:
100 else:
118 raise AttributeError, name
101 raise AttributeError, name
119
102
120 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
121 if changeid == None:
104 if changeid == None:
122 return context.workingctx(self)
105 return context.workingctx(self)
123 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
124
107
125 def __nonzero__(self):
108 def __nonzero__(self):
126 return True
109 return True
127
110
128 def __len__(self):
111 def __len__(self):
129 return len(self.changelog)
112 return len(self.changelog)
130
113
131 def __iter__(self):
114 def __iter__(self):
132 for i in xrange(len(self)):
115 for i in xrange(len(self)):
133 yield i
116 yield i
134
117
135 def url(self):
118 def url(self):
136 return 'file:' + self.root
119 return 'file:' + self.root
137
120
138 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
140
123
141 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
142
125
143 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
144 extra={}):
127 extra={}):
145 use_dirstate = parent is None
128 use_dirstate = parent is None
146
129
147 if isinstance(names, str):
130 if isinstance(names, str):
148 allchars = names
131 allchars = names
149 names = (names,)
132 names = (names,)
150 else:
133 else:
151 allchars = ''.join(names)
134 allchars = ''.join(names)
152 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
153 if c in allchars:
136 if c in allchars:
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
155
138
156 for name in names:
139 for name in names:
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 local=local)
141 local=local)
159
142
160 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
161 fp.seek(0, 2)
144 fp.seek(0, 2)
162 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
163 fp.write('\n')
146 fp.write('\n')
164 for name in names:
147 for name in names:
165 m = munge and munge(name) or name
148 m = munge and munge(name) or name
166 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
167 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
168 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
169 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
170 fp.close()
153 fp.close()
171
154
172 prevtags = ''
155 prevtags = ''
173 if local:
156 if local:
174 try:
157 try:
175 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
176 except IOError, err:
159 except IOError, err:
177 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
178 else:
161 else:
179 prevtags = fp.read()
162 prevtags = fp.read()
180
163
181 # local tags are stored in the current charset
164 # local tags are stored in the current charset
182 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
183 for name in names:
166 for name in names:
184 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
185 return
168 return
186
169
187 if use_dirstate:
170 if use_dirstate:
188 try:
171 try:
189 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
190 except IOError, err:
173 except IOError, err:
191 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
192 else:
175 else:
193 prevtags = fp.read()
176 prevtags = fp.read()
194 else:
177 else:
195 try:
178 try:
196 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
197 except revlog.LookupError:
180 except revlog.LookupError:
198 pass
181 pass
199 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
200 if prevtags:
183 if prevtags:
201 fp.write(prevtags)
184 fp.write(prevtags)
202
185
203 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
204 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
205
188
206 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
190 self.add(['.hgtags'])
208
191
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 extra=extra)
193 extra=extra)
211
194
212 for name in names:
195 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
214
197
215 return tagnode
198 return tagnode
216
199
217 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
219
202
220 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
221 string.
204 string.
222
205
223 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
208 changeset is committed with the change.
226
209
227 keyword arguments:
210 keyword arguments:
228
211
229 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
230 (default False)
213 (default False)
231
214
232 message: commit message to use if committing
215 message: commit message to use if committing
233
216
234 user: name of user to use if committing
217 user: name of user to use if committing
235
218
236 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
237
220
238 for x in self.status()[:5]:
221 for x in self.status()[:5]:
239 if '.hgtags' in x:
222 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
242
225
243 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
244
227
245 def tags(self):
228 def tags(self):
246 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
247 if self.tagscache:
230 if self.tagscache:
248 return self.tagscache
231 return self.tagscache
249
232
250 globaltags = {}
233 globaltags = {}
251 tagtypes = {}
234 tagtypes = {}
252
235
253 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
254 filetags = {}
237 filetags = {}
255 count = 0
238 count = 0
256
239
257 def warn(msg):
240 def warn(msg):
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259
242
260 for l in lines:
243 for l in lines:
261 count += 1
244 count += 1
262 if not l:
245 if not l:
263 continue
246 continue
264 s = l.split(" ", 1)
247 s = l.split(" ", 1)
265 if len(s) != 2:
248 if len(s) != 2:
266 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
267 continue
250 continue
268 node, key = s
251 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
253 try:
271 bin_n = bin(node)
254 bin_n = bin(node)
272 except TypeError:
255 except TypeError:
273 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
274 continue
257 continue
275 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
276 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
277 continue
260 continue
278
261
279 h = []
262 h = []
280 if key in filetags:
263 if key in filetags:
281 n, h = filetags[key]
264 n, h = filetags[key]
282 h.append(n)
265 h.append(n)
283 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
284
267
285 for k, nh in filetags.items():
268 for k, nh in filetags.items():
286 if k not in globaltags:
269 if k not in globaltags:
287 globaltags[k] = nh
270 globaltags[k] = nh
288 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
289 continue
272 continue
290
273
291 # we prefer the global tag if:
274 # we prefer the global tag if:
292 # it supercedes us OR
275 # it supercedes us OR
293 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
294 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
295 an, ah = nh
278 an, ah = nh
296 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
297 if (bn != an and an in bh and
280 if (bn != an and an in bh and
298 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
299 an = bn
282 an = bn
300 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
301 globaltags[k] = an, ah
284 globaltags[k] = an, ah
302 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
303
286
304 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
305 f = None
288 f = None
306 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
307 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
308 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
309 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
310
293
311 try:
294 try:
312 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
313 # localtags are stored in the local character set
296 # localtags are stored in the local character set
314 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
315 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
316 except IOError:
299 except IOError:
317 pass
300 pass
318
301
319 self.tagscache = {}
302 self.tagscache = {}
320 self._tagstypecache = {}
303 self._tagstypecache = {}
321 for k,nh in globaltags.items():
304 for k,nh in globaltags.items():
322 n = nh[0]
305 n = nh[0]
323 if n != nullid:
306 if n != nullid:
324 self.tagscache[k] = n
307 self.tagscache[k] = n
325 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
326 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
327 return self.tagscache
310 return self.tagscache
328
311
329 def tagtype(self, tagname):
312 def tagtype(self, tagname):
330 '''
313 '''
331 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
332
315
333 'local' : a local tag
316 'local' : a local tag
334 'global' : a global tag
317 'global' : a global tag
335 None : tag does not exist
318 None : tag does not exist
336 '''
319 '''
337
320
338 self.tags()
321 self.tags()
339
322
340 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
341
324
342 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
343 heads = self.heads()
326 heads = self.heads()
344 heads.reverse()
327 heads.reverse()
345 last = {}
328 last = {}
346 ret = []
329 ret = []
347 for node in heads:
330 for node in heads:
348 c = self[node]
331 c = self[node]
349 rev = c.rev()
332 rev = c.rev()
350 try:
333 try:
351 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
352 except revlog.LookupError:
335 except revlog.LookupError:
353 continue
336 continue
354 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
355 if fnode in last:
338 if fnode in last:
356 ret[last[fnode]] = None
339 ret[last[fnode]] = None
357 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
358 return [item for item in ret if item]
341 return [item for item in ret if item]
359
342
360 def tagslist(self):
343 def tagslist(self):
361 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
362 l = []
345 l = []
363 for t, n in self.tags().items():
346 for t, n in self.tags().items():
364 try:
347 try:
365 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
366 except:
349 except:
367 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
351 l.append((r, t, n))
369 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
370
353
371 def nodetags(self, node):
354 def nodetags(self, node):
372 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
373 if not self.nodetagscache:
356 if not self.nodetagscache:
374 self.nodetagscache = {}
357 self.nodetagscache = {}
375 for t, n in self.tags().items():
358 for t, n in self.tags().items():
376 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
377 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
378
361
379 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
380 tiprev = len(self) - 1
363 tiprev = len(self) - 1
381 if lrev != tiprev:
364 if lrev != tiprev:
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384
367
385 return partial
368 return partial
386
369
387 def branchtags(self):
370 def branchtags(self):
388 tip = self.changelog.tip()
371 tip = self.changelog.tip()
389 if self.branchcache is not None and self._branchcachetip == tip:
372 if self.branchcache is not None and self._branchcachetip == tip:
390 return self.branchcache
373 return self.branchcache
391
374
392 oldtip = self._branchcachetip
375 oldtip = self._branchcachetip
393 self._branchcachetip = tip
376 self._branchcachetip = tip
394 if self.branchcache is None:
377 if self.branchcache is None:
395 self.branchcache = {} # avoid recursion in changectx
378 self.branchcache = {} # avoid recursion in changectx
396 else:
379 else:
397 self.branchcache.clear() # keep using the same dict
380 self.branchcache.clear() # keep using the same dict
398 if oldtip is None or oldtip not in self.changelog.nodemap:
381 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
382 partial, last, lrev = self._readbranchcache()
400 else:
383 else:
401 lrev = self.changelog.rev(oldtip)
384 lrev = self.changelog.rev(oldtip)
402 partial = self._ubranchcache
385 partial = self._ubranchcache
403
386
404 self._branchtags(partial, lrev)
387 self._branchtags(partial, lrev)
405
388
406 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
407 # charset internally
390 # charset internally
408 for k, v in partial.items():
391 for k, v in partial.items():
409 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
410 self._ubranchcache = partial
393 self._ubranchcache = partial
411 return self.branchcache
394 return self.branchcache
412
395
413 def _readbranchcache(self):
396 def _readbranchcache(self):
414 partial = {}
397 partial = {}
415 try:
398 try:
416 f = self.opener("branch.cache")
399 f = self.opener("branch.cache")
417 lines = f.read().split('\n')
400 lines = f.read().split('\n')
418 f.close()
401 f.close()
419 except (IOError, OSError):
402 except (IOError, OSError):
420 return {}, nullid, nullrev
403 return {}, nullid, nullrev
421
404
422 try:
405 try:
423 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = lines.pop(0).split(" ", 1)
424 last, lrev = bin(last), int(lrev)
407 last, lrev = bin(last), int(lrev)
425 if lrev >= len(self) or self[lrev].node() != last:
408 if lrev >= len(self) or self[lrev].node() != last:
426 # invalidate the cache
409 # invalidate the cache
427 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
428 for l in lines:
411 for l in lines:
429 if not l: continue
412 if not l: continue
430 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
431 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
432 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
433 raise
416 raise
434 except Exception, inst:
417 except Exception, inst:
435 if self.ui.debugflag:
418 if self.ui.debugflag:
436 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
437 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
438 return partial, last, lrev
421 return partial, last, lrev
439
422
440 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
441 try:
424 try:
442 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
443 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
444 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
445 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
446 f.rename()
429 f.rename()
447 except (IOError, OSError):
430 except (IOError, OSError):
448 pass
431 pass
449
432
450 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
451 for r in xrange(start, end):
434 for r in xrange(start, end):
452 c = self[r]
435 c = self[r]
453 b = c.branch()
436 b = c.branch()
454 partial[b] = c.node()
437 partial[b] = c.node()
455
438
456 def lookup(self, key):
439 def lookup(self, key):
457 if key == '.':
440 if key == '.':
458 return self.dirstate.parents()[0]
441 return self.dirstate.parents()[0]
459 elif key == 'null':
442 elif key == 'null':
460 return nullid
443 return nullid
461 n = self.changelog._match(key)
444 n = self.changelog._match(key)
462 if n:
445 if n:
463 return n
446 return n
464 if key in self.tags():
447 if key in self.tags():
465 return self.tags()[key]
448 return self.tags()[key]
466 if key in self.branchtags():
449 if key in self.branchtags():
467 return self.branchtags()[key]
450 return self.branchtags()[key]
468 n = self.changelog._partialmatch(key)
451 n = self.changelog._partialmatch(key)
469 if n:
452 if n:
470 return n
453 return n
471 try:
454 try:
472 if len(key) == 20:
455 if len(key) == 20:
473 key = hex(key)
456 key = hex(key)
474 except:
457 except:
475 pass
458 pass
476 raise repo.RepoError(_("unknown revision '%s'") % key)
459 raise repo.RepoError(_("unknown revision '%s'") % key)
477
460
478 def local(self):
461 def local(self):
479 return True
462 return True
480
463
481 def join(self, f):
464 def join(self, f):
482 return os.path.join(self.path, f)
465 return os.path.join(self.path, f)
483
466
484 def sjoin(self, f):
485 f = self.encodefn(f)
486 return os.path.join(self.spath, f)
487
488 def wjoin(self, f):
467 def wjoin(self, f):
489 return os.path.join(self.root, f)
468 return os.path.join(self.root, f)
490
469
491 def rjoin(self, f):
470 def rjoin(self, f):
492 return os.path.join(self.root, util.pconvert(f))
471 return os.path.join(self.root, util.pconvert(f))
493
472
494 def file(self, f):
473 def file(self, f):
495 if f[0] == '/':
474 if f[0] == '/':
496 f = f[1:]
475 f = f[1:]
497 return filelog.filelog(self.sopener, f)
476 return filelog.filelog(self.sopener, f)
498
477
499 def changectx(self, changeid):
478 def changectx(self, changeid):
500 return self[changeid]
479 return self[changeid]
501
480
502 def parents(self, changeid=None):
481 def parents(self, changeid=None):
503 '''get list of changectxs for parents of changeid'''
482 '''get list of changectxs for parents of changeid'''
504 return self[changeid].parents()
483 return self[changeid].parents()
505
484
506 def filectx(self, path, changeid=None, fileid=None):
485 def filectx(self, path, changeid=None, fileid=None):
507 """changeid can be a changeset revision, node, or tag.
486 """changeid can be a changeset revision, node, or tag.
508 fileid can be a file revision or node."""
487 fileid can be a file revision or node."""
509 return context.filectx(self, path, changeid, fileid)
488 return context.filectx(self, path, changeid, fileid)
510
489
511 def getcwd(self):
490 def getcwd(self):
512 return self.dirstate.getcwd()
491 return self.dirstate.getcwd()
513
492
514 def pathto(self, f, cwd=None):
493 def pathto(self, f, cwd=None):
515 return self.dirstate.pathto(f, cwd)
494 return self.dirstate.pathto(f, cwd)
516
495
517 def wfile(self, f, mode='r'):
496 def wfile(self, f, mode='r'):
518 return self.wopener(f, mode)
497 return self.wopener(f, mode)
519
498
520 def _link(self, f):
499 def _link(self, f):
521 return os.path.islink(self.wjoin(f))
500 return os.path.islink(self.wjoin(f))
522
501
523 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
524 if filter not in self.filterpats:
503 if filter not in self.filterpats:
525 l = []
504 l = []
526 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 fn = None
507 fn = None
529 params = cmd
508 params = cmd
530 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
531 if cmd.startswith(name):
510 if cmd.startswith(name):
532 fn = filterfn
511 fn = filterfn
533 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
534 break
513 break
535 if not fn:
514 if not fn:
536 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
537 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
538 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
539 oldfn = fn
518 oldfn = fn
540 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
541 l.append((mf, fn, params))
520 l.append((mf, fn, params))
542 self.filterpats[filter] = l
521 self.filterpats[filter] = l
543
522
544 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
545 if mf(filename):
524 if mf(filename):
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 break
527 break
549
528
550 return data
529 return data
551
530
552 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
553 self._datafilters[name] = filter
532 self._datafilters[name] = filter
554
533
555 def wread(self, filename):
534 def wread(self, filename):
556 if self._link(filename):
535 if self._link(filename):
557 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
558 else:
537 else:
559 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
560 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
561
540
562 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
563 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
564 try:
543 try:
565 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
566 except OSError:
545 except OSError:
567 pass
546 pass
568 self.wopener(filename, 'w').write(data)
547 self.wopener(filename, 'w').write(data)
569 util.set_flags(self.wjoin(filename), flags)
548 util.set_flags(self.wjoin(filename), flags)
570
549
571 def wwritedata(self, filename, data):
550 def wwritedata(self, filename, data):
572 return self._filter("decode", filename, data)
551 return self._filter("decode", filename, data)
573
552
574 def transaction(self):
553 def transaction(self):
575 if self._transref and self._transref():
554 if self._transref and self._transref():
576 return self._transref().nest()
555 return self._transref().nest()
577
556
578 # abort here if the journal already exists
557 # abort here if the journal already exists
579 if os.path.exists(self.sjoin("journal")):
558 if os.path.exists(self.sjoin("journal")):
580 raise repo.RepoError(_("journal already exists - run hg recover"))
559 raise repo.RepoError(_("journal already exists - run hg recover"))
581
560
582 # save dirstate for rollback
561 # save dirstate for rollback
583 try:
562 try:
584 ds = self.opener("dirstate").read()
563 ds = self.opener("dirstate").read()
585 except IOError:
564 except IOError:
586 ds = ""
565 ds = ""
587 self.opener("journal.dirstate", "w").write(ds)
566 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
567 self.opener("journal.branch", "w").write(self.dirstate.branch())
589
568
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.branch"), self.join("undo.branch"))]
571 (self.join("journal.branch"), self.join("undo.branch"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
572 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
573 self.sjoin("journal"),
595 aftertrans(renames),
574 aftertrans(renames),
596 self._createmode)
575 self._createmode)
597 self._transref = weakref.ref(tr)
576 self._transref = weakref.ref(tr)
598 return tr
577 return tr
599
578
600 def recover(self):
579 def recover(self):
601 l = self.lock()
580 l = self.lock()
602 try:
581 try:
603 if os.path.exists(self.sjoin("journal")):
582 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
583 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
584 transaction.rollback(self.sopener, self.sjoin("journal"))
606 self.invalidate()
585 self.invalidate()
607 return True
586 return True
608 else:
587 else:
609 self.ui.warn(_("no interrupted transaction available\n"))
588 self.ui.warn(_("no interrupted transaction available\n"))
610 return False
589 return False
611 finally:
590 finally:
612 del l
591 del l
613
592
614 def rollback(self):
593 def rollback(self):
615 wlock = lock = None
594 wlock = lock = None
616 try:
595 try:
617 wlock = self.wlock()
596 wlock = self.wlock()
618 lock = self.lock()
597 lock = self.lock()
619 if os.path.exists(self.sjoin("undo")):
598 if os.path.exists(self.sjoin("undo")):
620 self.ui.status(_("rolling back last transaction\n"))
599 self.ui.status(_("rolling back last transaction\n"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
600 transaction.rollback(self.sopener, self.sjoin("undo"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 try:
602 try:
624 branch = self.opener("undo.branch").read()
603 branch = self.opener("undo.branch").read()
625 self.dirstate.setbranch(branch)
604 self.dirstate.setbranch(branch)
626 except IOError:
605 except IOError:
627 self.ui.warn(_("Named branch could not be reset, "
606 self.ui.warn(_("Named branch could not be reset, "
628 "current branch still is: %s\n")
607 "current branch still is: %s\n")
629 % util.tolocal(self.dirstate.branch()))
608 % util.tolocal(self.dirstate.branch()))
630 self.invalidate()
609 self.invalidate()
631 self.dirstate.invalidate()
610 self.dirstate.invalidate()
632 else:
611 else:
633 self.ui.warn(_("no rollback information available\n"))
612 self.ui.warn(_("no rollback information available\n"))
634 finally:
613 finally:
635 del lock, wlock
614 del lock, wlock
636
615
637 def invalidate(self):
616 def invalidate(self):
638 for a in "changelog manifest".split():
617 for a in "changelog manifest".split():
639 if a in self.__dict__:
618 if a in self.__dict__:
640 delattr(self, a)
619 delattr(self, a)
641 self.tagscache = None
620 self.tagscache = None
642 self._tagstypecache = None
621 self._tagstypecache = None
643 self.nodetagscache = None
622 self.nodetagscache = None
644 self.branchcache = None
623 self.branchcache = None
645 self._ubranchcache = None
624 self._ubranchcache = None
646 self._branchcachetip = None
625 self._branchcachetip = None
647
626
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 try:
628 try:
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
629 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 except lock.LockHeld, inst:
630 except lock.LockHeld, inst:
652 if not wait:
631 if not wait:
653 raise
632 raise
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 (desc, inst.locker))
634 (desc, inst.locker))
656 # default to 600 seconds timeout
635 # default to 600 seconds timeout
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 releasefn, desc=desc)
637 releasefn, desc=desc)
659 if acquirefn:
638 if acquirefn:
660 acquirefn()
639 acquirefn()
661 return l
640 return l
662
641
663 def lock(self, wait=True):
642 def lock(self, wait=True):
664 if self._lockref and self._lockref():
643 if self._lockref and self._lockref():
665 return self._lockref()
644 return self._lockref()
666
645
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
646 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 _('repository %s') % self.origroot)
647 _('repository %s') % self.origroot)
669 self._lockref = weakref.ref(l)
648 self._lockref = weakref.ref(l)
670 return l
649 return l
671
650
672 def wlock(self, wait=True):
651 def wlock(self, wait=True):
673 if self._wlockref and self._wlockref():
652 if self._wlockref and self._wlockref():
674 return self._wlockref()
653 return self._wlockref()
675
654
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
655 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
656 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
657 self.origroot)
679 self._wlockref = weakref.ref(l)
658 self._wlockref = weakref.ref(l)
680 return l
659 return l
681
660
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
661 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
662 """
684 commit an individual file as part of a larger transaction
663 commit an individual file as part of a larger transaction
685 """
664 """
686
665
687 fn = fctx.path()
666 fn = fctx.path()
688 t = fctx.data()
667 t = fctx.data()
689 fl = self.file(fn)
668 fl = self.file(fn)
690 fp1 = manifest1.get(fn, nullid)
669 fp1 = manifest1.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
670 fp2 = manifest2.get(fn, nullid)
692
671
693 meta = {}
672 meta = {}
694 cp = fctx.renamed()
673 cp = fctx.renamed()
695 if cp and cp[0] != fn:
674 if cp and cp[0] != fn:
696 cp = cp[0]
675 cp = cp[0]
697 # Mark the new revision of this file as a copy of another
676 # Mark the new revision of this file as a copy of another
698 # file. This copy data will effectively act as a parent
677 # file. This copy data will effectively act as a parent
699 # of this new revision. If this is a merge, the first
678 # of this new revision. If this is a merge, the first
700 # parent will be the nullid (meaning "look up the copy data")
679 # parent will be the nullid (meaning "look up the copy data")
701 # and the second one will be the other parent. For example:
680 # and the second one will be the other parent. For example:
702 #
681 #
703 # 0 --- 1 --- 3 rev1 changes file foo
682 # 0 --- 1 --- 3 rev1 changes file foo
704 # \ / rev2 renames foo to bar and changes it
683 # \ / rev2 renames foo to bar and changes it
705 # \- 2 -/ rev3 should have bar with all changes and
684 # \- 2 -/ rev3 should have bar with all changes and
706 # should record that bar descends from
685 # should record that bar descends from
707 # bar in rev2 and foo in rev1
686 # bar in rev2 and foo in rev1
708 #
687 #
709 # this allows this merge to succeed:
688 # this allows this merge to succeed:
710 #
689 #
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
690 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
691 # \ / merging rev3 and rev4 should use bar@rev2
713 # \- 2 --- 4 as the merge base
692 # \- 2 --- 4 as the merge base
714 #
693 #
715 meta["copy"] = cp
694 meta["copy"] = cp
716 if not manifest2: # not a branch merge
695 if not manifest2: # not a branch merge
717 meta["copyrev"] = hex(manifest1[cp])
696 meta["copyrev"] = hex(manifest1[cp])
718 fp2 = nullid
697 fp2 = nullid
719 elif fp2 != nullid: # copied on remote side
698 elif fp2 != nullid: # copied on remote side
720 meta["copyrev"] = hex(manifest1[cp])
699 meta["copyrev"] = hex(manifest1[cp])
721 elif fp1 != nullid: # copied on local side, reversed
700 elif fp1 != nullid: # copied on local side, reversed
722 meta["copyrev"] = hex(manifest2[cp])
701 meta["copyrev"] = hex(manifest2[cp])
723 fp2 = fp1
702 fp2 = fp1
724 elif cp in manifest2: # directory rename on local side
703 elif cp in manifest2: # directory rename on local side
725 meta["copyrev"] = hex(manifest2[cp])
704 meta["copyrev"] = hex(manifest2[cp])
726 else: # directory rename on remote side
705 else: # directory rename on remote side
727 meta["copyrev"] = hex(manifest1[cp])
706 meta["copyrev"] = hex(manifest1[cp])
728 self.ui.debug(_(" %s: copy %s:%s\n") %
707 self.ui.debug(_(" %s: copy %s:%s\n") %
729 (fn, cp, meta["copyrev"]))
708 (fn, cp, meta["copyrev"]))
730 fp1 = nullid
709 fp1 = nullid
731 elif fp2 != nullid:
710 elif fp2 != nullid:
732 # is one parent an ancestor of the other?
711 # is one parent an ancestor of the other?
733 fpa = fl.ancestor(fp1, fp2)
712 fpa = fl.ancestor(fp1, fp2)
734 if fpa == fp1:
713 if fpa == fp1:
735 fp1, fp2 = fp2, nullid
714 fp1, fp2 = fp2, nullid
736 elif fpa == fp2:
715 elif fpa == fp2:
737 fp2 = nullid
716 fp2 = nullid
738
717
739 # is the file unmodified from the parent? report existing entry
718 # is the file unmodified from the parent? report existing entry
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
719 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 return fp1
720 return fp1
742
721
743 changelist.append(fn)
722 changelist.append(fn)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
723 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745
724
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
725 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 if p1 is None:
726 if p1 is None:
748 p1, p2 = self.dirstate.parents()
727 p1, p2 = self.dirstate.parents()
749 return self.commit(files=files, text=text, user=user, date=date,
728 return self.commit(files=files, text=text, user=user, date=date,
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
729 p1=p1, p2=p2, extra=extra, empty_ok=True)
751
730
752 def commit(self, files=None, text="", user=None, date=None,
731 def commit(self, files=None, text="", user=None, date=None,
753 match=None, force=False, force_editor=False,
732 match=None, force=False, force_editor=False,
754 p1=None, p2=None, extra={}, empty_ok=False):
733 p1=None, p2=None, extra={}, empty_ok=False):
755 wlock = lock = None
734 wlock = lock = None
756 if files:
735 if files:
757 files = util.unique(files)
736 files = util.unique(files)
758 try:
737 try:
759 wlock = self.wlock()
738 wlock = self.wlock()
760 lock = self.lock()
739 lock = self.lock()
761 use_dirstate = (p1 is None) # not rawcommit
740 use_dirstate = (p1 is None) # not rawcommit
762
741
763 if use_dirstate:
742 if use_dirstate:
764 p1, p2 = self.dirstate.parents()
743 p1, p2 = self.dirstate.parents()
765 update_dirstate = True
744 update_dirstate = True
766
745
767 if (not force and p2 != nullid and
746 if (not force and p2 != nullid and
768 (match and (match.files() or match.anypats()))):
747 (match and (match.files() or match.anypats()))):
769 raise util.Abort(_('cannot partially commit a merge '
748 raise util.Abort(_('cannot partially commit a merge '
770 '(do not specify files or patterns)'))
749 '(do not specify files or patterns)'))
771
750
772 if files:
751 if files:
773 modified, removed = [], []
752 modified, removed = [], []
774 for f in files:
753 for f in files:
775 s = self.dirstate[f]
754 s = self.dirstate[f]
776 if s in 'nma':
755 if s in 'nma':
777 modified.append(f)
756 modified.append(f)
778 elif s == 'r':
757 elif s == 'r':
779 removed.append(f)
758 removed.append(f)
780 else:
759 else:
781 self.ui.warn(_("%s not tracked!\n") % f)
760 self.ui.warn(_("%s not tracked!\n") % f)
782 changes = [modified, [], removed, [], []]
761 changes = [modified, [], removed, [], []]
783 else:
762 else:
784 changes = self.status(match=match)
763 changes = self.status(match=match)
785 else:
764 else:
786 p1, p2 = p1, p2 or nullid
765 p1, p2 = p1, p2 or nullid
787 update_dirstate = (self.dirstate.parents()[0] == p1)
766 update_dirstate = (self.dirstate.parents()[0] == p1)
788 changes = [files, [], [], [], []]
767 changes = [files, [], [], [], []]
789
768
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
769 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 extra, changes)
770 extra, changes)
792 return self._commitctx(wctx, force, force_editor, empty_ok,
771 return self._commitctx(wctx, force, force_editor, empty_ok,
793 use_dirstate, update_dirstate)
772 use_dirstate, update_dirstate)
794 finally:
773 finally:
795 del lock, wlock
774 del lock, wlock
796
775
797 def commitctx(self, ctx):
776 def commitctx(self, ctx):
798 wlock = lock = None
777 wlock = lock = None
799 try:
778 try:
800 wlock = self.wlock()
779 wlock = self.wlock()
801 lock = self.lock()
780 lock = self.lock()
802 return self._commitctx(ctx, force=True, force_editor=False,
781 return self._commitctx(ctx, force=True, force_editor=False,
803 empty_ok=True, use_dirstate=False,
782 empty_ok=True, use_dirstate=False,
804 update_dirstate=False)
783 update_dirstate=False)
805 finally:
784 finally:
806 del lock, wlock
785 del lock, wlock
807
786
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
787 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 use_dirstate=True, update_dirstate=True):
788 use_dirstate=True, update_dirstate=True):
810 tr = None
789 tr = None
811 valid = 0 # don't save the dirstate if this isn't set
790 valid = 0 # don't save the dirstate if this isn't set
812 try:
791 try:
813 commit = util.sort(wctx.modified() + wctx.added())
792 commit = util.sort(wctx.modified() + wctx.added())
814 remove = wctx.removed()
793 remove = wctx.removed()
815 extra = wctx.extra().copy()
794 extra = wctx.extra().copy()
816 branchname = extra['branch']
795 branchname = extra['branch']
817 user = wctx.user()
796 user = wctx.user()
818 text = wctx.description()
797 text = wctx.description()
819
798
820 p1, p2 = [p.node() for p in wctx.parents()]
799 p1, p2 = [p.node() for p in wctx.parents()]
821 c1 = self.changelog.read(p1)
800 c1 = self.changelog.read(p1)
822 c2 = self.changelog.read(p2)
801 c2 = self.changelog.read(p2)
823 m1 = self.manifest.read(c1[0]).copy()
802 m1 = self.manifest.read(c1[0]).copy()
824 m2 = self.manifest.read(c2[0])
803 m2 = self.manifest.read(c2[0])
825
804
826 if use_dirstate:
805 if use_dirstate:
827 oldname = c1[5].get("branch") # stored in UTF-8
806 oldname = c1[5].get("branch") # stored in UTF-8
828 if (not commit and not remove and not force and p2 == nullid
807 if (not commit and not remove and not force and p2 == nullid
829 and branchname == oldname):
808 and branchname == oldname):
830 self.ui.status(_("nothing changed\n"))
809 self.ui.status(_("nothing changed\n"))
831 return None
810 return None
832
811
833 xp1 = hex(p1)
812 xp1 = hex(p1)
834 if p2 == nullid: xp2 = ''
813 if p2 == nullid: xp2 = ''
835 else: xp2 = hex(p2)
814 else: xp2 = hex(p2)
836
815
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
816 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838
817
839 tr = self.transaction()
818 tr = self.transaction()
840 trp = weakref.proxy(tr)
819 trp = weakref.proxy(tr)
841
820
842 # check in files
821 # check in files
843 new = {}
822 new = {}
844 changed = []
823 changed = []
845 linkrev = len(self)
824 linkrev = len(self)
846 for f in commit:
825 for f in commit:
847 self.ui.note(f + "\n")
826 self.ui.note(f + "\n")
848 try:
827 try:
849 fctx = wctx.filectx(f)
828 fctx = wctx.filectx(f)
850 newflags = fctx.flags()
829 newflags = fctx.flags()
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
830 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 if ((not changed or changed[-1] != f) and
831 if ((not changed or changed[-1] != f) and
853 m2.get(f) != new[f]):
832 m2.get(f) != new[f]):
854 # mention the file in the changelog if some
833 # mention the file in the changelog if some
855 # flag changed, even if there was no content
834 # flag changed, even if there was no content
856 # change.
835 # change.
857 if m1.flags(f) != newflags:
836 if m1.flags(f) != newflags:
858 changed.append(f)
837 changed.append(f)
859 m1.set(f, newflags)
838 m1.set(f, newflags)
860 if use_dirstate:
839 if use_dirstate:
861 self.dirstate.normal(f)
840 self.dirstate.normal(f)
862
841
863 except (OSError, IOError):
842 except (OSError, IOError):
864 if use_dirstate:
843 if use_dirstate:
865 self.ui.warn(_("trouble committing %s!\n") % f)
844 self.ui.warn(_("trouble committing %s!\n") % f)
866 raise
845 raise
867 else:
846 else:
868 remove.append(f)
847 remove.append(f)
869
848
870 # update manifest
849 # update manifest
871 m1.update(new)
850 m1.update(new)
872 removed = []
851 removed = []
873
852
874 for f in util.sort(remove):
853 for f in util.sort(remove):
875 if f in m1:
854 if f in m1:
876 del m1[f]
855 del m1[f]
877 removed.append(f)
856 removed.append(f)
878 elif f in m2:
857 elif f in m2:
879 removed.append(f)
858 removed.append(f)
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
859 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 (new, removed))
860 (new, removed))
882
861
883 # add changeset
862 # add changeset
884 if (not empty_ok and not text) or force_editor:
863 if (not empty_ok and not text) or force_editor:
885 edittext = []
864 edittext = []
886 if text:
865 if text:
887 edittext.append(text)
866 edittext.append(text)
888 edittext.append("")
867 edittext.append("")
889 edittext.append(_("HG: Enter commit message."
868 edittext.append(_("HG: Enter commit message."
890 " Lines beginning with 'HG:' are removed."))
869 " Lines beginning with 'HG:' are removed."))
891 edittext.append("HG: --")
870 edittext.append("HG: --")
892 edittext.append("HG: user: %s" % user)
871 edittext.append("HG: user: %s" % user)
893 if p2 != nullid:
872 if p2 != nullid:
894 edittext.append("HG: branch merge")
873 edittext.append("HG: branch merge")
895 if branchname:
874 if branchname:
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
875 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 edittext.extend(["HG: changed %s" % f for f in changed])
876 edittext.extend(["HG: changed %s" % f for f in changed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
877 edittext.extend(["HG: removed %s" % f for f in removed])
899 if not changed and not remove:
878 if not changed and not remove:
900 edittext.append("HG: no files changed")
879 edittext.append("HG: no files changed")
901 edittext.append("")
880 edittext.append("")
902 # run editor in the repository root
881 # run editor in the repository root
903 olddir = os.getcwd()
882 olddir = os.getcwd()
904 os.chdir(self.root)
883 os.chdir(self.root)
905 text = self.ui.edit("\n".join(edittext), user)
884 text = self.ui.edit("\n".join(edittext), user)
906 os.chdir(olddir)
885 os.chdir(olddir)
907
886
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
887 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 while lines and not lines[0]:
888 while lines and not lines[0]:
910 del lines[0]
889 del lines[0]
911 if not lines and use_dirstate:
890 if not lines and use_dirstate:
912 raise util.Abort(_("empty commit message"))
891 raise util.Abort(_("empty commit message"))
913 text = '\n'.join(lines)
892 text = '\n'.join(lines)
914
893
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
894 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 user, wctx.date(), extra)
895 user, wctx.date(), extra)
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
896 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 parent2=xp2)
897 parent2=xp2)
919 tr.close()
898 tr.close()
920
899
921 if self.branchcache:
900 if self.branchcache:
922 self.branchtags()
901 self.branchtags()
923
902
924 if use_dirstate or update_dirstate:
903 if use_dirstate or update_dirstate:
925 self.dirstate.setparents(n)
904 self.dirstate.setparents(n)
926 if use_dirstate:
905 if use_dirstate:
927 for f in removed:
906 for f in removed:
928 self.dirstate.forget(f)
907 self.dirstate.forget(f)
929 valid = 1 # our dirstate updates are complete
908 valid = 1 # our dirstate updates are complete
930
909
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 return n
911 return n
933 finally:
912 finally:
934 if not valid: # don't save our updated dirstate
913 if not valid: # don't save our updated dirstate
935 self.dirstate.invalidate()
914 self.dirstate.invalidate()
936 del tr
915 del tr
937
916
938 def walk(self, match, node=None):
917 def walk(self, match, node=None):
939 '''
918 '''
940 walk recursively through the directory tree or a given
919 walk recursively through the directory tree or a given
941 changeset, finding all files matched by the match
920 changeset, finding all files matched by the match
942 function
921 function
943 '''
922 '''
944 return self[node].walk(match)
923 return self[node].walk(match)
945
924
946 def status(self, node1='.', node2=None, match=None,
925 def status(self, node1='.', node2=None, match=None,
947 ignored=False, clean=False, unknown=False):
926 ignored=False, clean=False, unknown=False):
948 """return status of files between two nodes or node and working directory
927 """return status of files between two nodes or node and working directory
949
928
950 If node1 is None, use the first dirstate parent instead.
929 If node1 is None, use the first dirstate parent instead.
951 If node2 is None, compare node1 with working directory.
930 If node2 is None, compare node1 with working directory.
952 """
931 """
953
932
954 def mfmatches(ctx):
933 def mfmatches(ctx):
955 mf = ctx.manifest().copy()
934 mf = ctx.manifest().copy()
956 for fn in mf.keys():
935 for fn in mf.keys():
957 if not match(fn):
936 if not match(fn):
958 del mf[fn]
937 del mf[fn]
959 return mf
938 return mf
960
939
961 ctx1 = self[node1]
940 ctx1 = self[node1]
962 ctx2 = self[node2]
941 ctx2 = self[node2]
963 working = ctx2 == self[None]
942 working = ctx2 == self[None]
964 parentworking = working and ctx1 == self['.']
943 parentworking = working and ctx1 == self['.']
965 match = match or match_.always(self.root, self.getcwd())
944 match = match or match_.always(self.root, self.getcwd())
966 listignored, listclean, listunknown = ignored, clean, unknown
945 listignored, listclean, listunknown = ignored, clean, unknown
967
946
968 if working: # we need to scan the working dir
947 if working: # we need to scan the working dir
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
948 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
949 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971
950
972 # check for any possibly clean files
951 # check for any possibly clean files
973 if parentworking and cmp:
952 if parentworking and cmp:
974 fixup = []
953 fixup = []
975 # do a full compare of any files that might have changed
954 # do a full compare of any files that might have changed
976 for f in cmp:
955 for f in cmp:
977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
956 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
978 or ctx1[f].cmp(ctx2[f].data())):
957 or ctx1[f].cmp(ctx2[f].data())):
979 modified.append(f)
958 modified.append(f)
980 else:
959 else:
981 fixup.append(f)
960 fixup.append(f)
982
961
983 if listclean:
962 if listclean:
984 clean += fixup
963 clean += fixup
985
964
986 # update dirstate for files that are actually clean
965 # update dirstate for files that are actually clean
987 if fixup:
966 if fixup:
988 wlock = None
967 wlock = None
989 try:
968 try:
990 try:
969 try:
991 wlock = self.wlock(False)
970 wlock = self.wlock(False)
992 for f in fixup:
971 for f in fixup:
993 self.dirstate.normal(f)
972 self.dirstate.normal(f)
994 except lock.LockException:
973 except lock.LockException:
995 pass
974 pass
996 finally:
975 finally:
997 del wlock
976 del wlock
998
977
999 if not parentworking:
978 if not parentworking:
1000 mf1 = mfmatches(ctx1)
979 mf1 = mfmatches(ctx1)
1001 if working:
980 if working:
1002 # we are comparing working dir against non-parent
981 # we are comparing working dir against non-parent
1003 # generate a pseudo-manifest for the working dir
982 # generate a pseudo-manifest for the working dir
1004 mf2 = mfmatches(self['.'])
983 mf2 = mfmatches(self['.'])
1005 for f in cmp + modified + added:
984 for f in cmp + modified + added:
1006 mf2[f] = None
985 mf2[f] = None
1007 mf2.set(f, ctx2.flags(f))
986 mf2.set(f, ctx2.flags(f))
1008 for f in removed:
987 for f in removed:
1009 if f in mf2:
988 if f in mf2:
1010 del mf2[f]
989 del mf2[f]
1011 else:
990 else:
1012 # we are comparing two revisions
991 # we are comparing two revisions
1013 deleted, unknown, ignored = [], [], []
992 deleted, unknown, ignored = [], [], []
1014 mf2 = mfmatches(ctx2)
993 mf2 = mfmatches(ctx2)
1015
994
1016 modified, added, clean = [], [], []
995 modified, added, clean = [], [], []
1017 for fn in mf2:
996 for fn in mf2:
1018 if fn in mf1:
997 if fn in mf1:
1019 if (mf1.flags(fn) != mf2.flags(fn) or
998 if (mf1.flags(fn) != mf2.flags(fn) or
1020 (mf1[fn] != mf2[fn] and
999 (mf1[fn] != mf2[fn] and
1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1000 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1022 modified.append(fn)
1001 modified.append(fn)
1023 elif listclean:
1002 elif listclean:
1024 clean.append(fn)
1003 clean.append(fn)
1025 del mf1[fn]
1004 del mf1[fn]
1026 else:
1005 else:
1027 added.append(fn)
1006 added.append(fn)
1028 removed = mf1.keys()
1007 removed = mf1.keys()
1029
1008
1030 r = modified, added, removed, deleted, unknown, ignored, clean
1009 r = modified, added, removed, deleted, unknown, ignored, clean
1031 [l.sort() for l in r]
1010 [l.sort() for l in r]
1032 return r
1011 return r
1033
1012
1034 def add(self, list):
1013 def add(self, list):
1035 wlock = self.wlock()
1014 wlock = self.wlock()
1036 try:
1015 try:
1037 rejected = []
1016 rejected = []
1038 for f in list:
1017 for f in list:
1039 p = self.wjoin(f)
1018 p = self.wjoin(f)
1040 try:
1019 try:
1041 st = os.lstat(p)
1020 st = os.lstat(p)
1042 except:
1021 except:
1043 self.ui.warn(_("%s does not exist!\n") % f)
1022 self.ui.warn(_("%s does not exist!\n") % f)
1044 rejected.append(f)
1023 rejected.append(f)
1045 continue
1024 continue
1046 if st.st_size > 10000000:
1025 if st.st_size > 10000000:
1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1048 " performance problems\n"
1027 " performance problems\n"
1049 "(use 'hg revert %s' to unadd the file)\n")
1028 "(use 'hg revert %s' to unadd the file)\n")
1050 % (f, f))
1029 % (f, f))
1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1030 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1052 self.ui.warn(_("%s not added: only files and symlinks "
1031 self.ui.warn(_("%s not added: only files and symlinks "
1053 "supported currently\n") % f)
1032 "supported currently\n") % f)
1054 rejected.append(p)
1033 rejected.append(p)
1055 elif self.dirstate[f] in 'amn':
1034 elif self.dirstate[f] in 'amn':
1056 self.ui.warn(_("%s already tracked!\n") % f)
1035 self.ui.warn(_("%s already tracked!\n") % f)
1057 elif self.dirstate[f] == 'r':
1036 elif self.dirstate[f] == 'r':
1058 self.dirstate.normallookup(f)
1037 self.dirstate.normallookup(f)
1059 else:
1038 else:
1060 self.dirstate.add(f)
1039 self.dirstate.add(f)
1061 return rejected
1040 return rejected
1062 finally:
1041 finally:
1063 del wlock
1042 del wlock
1064
1043
1065 def forget(self, list):
1044 def forget(self, list):
1066 wlock = self.wlock()
1045 wlock = self.wlock()
1067 try:
1046 try:
1068 for f in list:
1047 for f in list:
1069 if self.dirstate[f] != 'a':
1048 if self.dirstate[f] != 'a':
1070 self.ui.warn(_("%s not added!\n") % f)
1049 self.ui.warn(_("%s not added!\n") % f)
1071 else:
1050 else:
1072 self.dirstate.forget(f)
1051 self.dirstate.forget(f)
1073 finally:
1052 finally:
1074 del wlock
1053 del wlock
1075
1054
1076 def remove(self, list, unlink=False):
1055 def remove(self, list, unlink=False):
1077 wlock = None
1056 wlock = None
1078 try:
1057 try:
1079 if unlink:
1058 if unlink:
1080 for f in list:
1059 for f in list:
1081 try:
1060 try:
1082 util.unlink(self.wjoin(f))
1061 util.unlink(self.wjoin(f))
1083 except OSError, inst:
1062 except OSError, inst:
1084 if inst.errno != errno.ENOENT:
1063 if inst.errno != errno.ENOENT:
1085 raise
1064 raise
1086 wlock = self.wlock()
1065 wlock = self.wlock()
1087 for f in list:
1066 for f in list:
1088 if unlink and os.path.exists(self.wjoin(f)):
1067 if unlink and os.path.exists(self.wjoin(f)):
1089 self.ui.warn(_("%s still exists!\n") % f)
1068 self.ui.warn(_("%s still exists!\n") % f)
1090 elif self.dirstate[f] == 'a':
1069 elif self.dirstate[f] == 'a':
1091 self.dirstate.forget(f)
1070 self.dirstate.forget(f)
1092 elif f not in self.dirstate:
1071 elif f not in self.dirstate:
1093 self.ui.warn(_("%s not tracked!\n") % f)
1072 self.ui.warn(_("%s not tracked!\n") % f)
1094 else:
1073 else:
1095 self.dirstate.remove(f)
1074 self.dirstate.remove(f)
1096 finally:
1075 finally:
1097 del wlock
1076 del wlock
1098
1077
1099 def undelete(self, list):
1078 def undelete(self, list):
1100 wlock = None
1079 wlock = None
1101 try:
1080 try:
1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1081 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 for p in self.dirstate.parents() if p != nullid]
1082 for p in self.dirstate.parents() if p != nullid]
1104 wlock = self.wlock()
1083 wlock = self.wlock()
1105 for f in list:
1084 for f in list:
1106 if self.dirstate[f] != 'r':
1085 if self.dirstate[f] != 'r':
1107 self.ui.warn("%s not removed!\n" % f)
1086 self.ui.warn("%s not removed!\n" % f)
1108 else:
1087 else:
1109 m = f in manifests[0] and manifests[0] or manifests[1]
1088 m = f in manifests[0] and manifests[0] or manifests[1]
1110 t = self.file(f).read(m[f])
1089 t = self.file(f).read(m[f])
1111 self.wwrite(f, t, m.flags(f))
1090 self.wwrite(f, t, m.flags(f))
1112 self.dirstate.normal(f)
1091 self.dirstate.normal(f)
1113 finally:
1092 finally:
1114 del wlock
1093 del wlock
1115
1094
1116 def copy(self, source, dest):
1095 def copy(self, source, dest):
1117 wlock = None
1096 wlock = None
1118 try:
1097 try:
1119 p = self.wjoin(dest)
1098 p = self.wjoin(dest)
1120 if not (os.path.exists(p) or os.path.islink(p)):
1099 if not (os.path.exists(p) or os.path.islink(p)):
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1100 self.ui.warn(_("%s does not exist!\n") % dest)
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1101 elif not (os.path.isfile(p) or os.path.islink(p)):
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1102 self.ui.warn(_("copy failed: %s is not a file or a "
1124 "symbolic link\n") % dest)
1103 "symbolic link\n") % dest)
1125 else:
1104 else:
1126 wlock = self.wlock()
1105 wlock = self.wlock()
1127 if dest not in self.dirstate:
1106 if dest not in self.dirstate:
1128 self.dirstate.add(dest)
1107 self.dirstate.add(dest)
1129 self.dirstate.copy(source, dest)
1108 self.dirstate.copy(source, dest)
1130 finally:
1109 finally:
1131 del wlock
1110 del wlock
1132
1111
1133 def heads(self, start=None):
1112 def heads(self, start=None):
1134 heads = self.changelog.heads(start)
1113 heads = self.changelog.heads(start)
1135 # sort the output in rev descending order
1114 # sort the output in rev descending order
1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1115 heads = [(-self.changelog.rev(h), h) for h in heads]
1137 return [n for (r, n) in util.sort(heads)]
1116 return [n for (r, n) in util.sort(heads)]
1138
1117
1139 def branchheads(self, branch=None, start=None):
1118 def branchheads(self, branch=None, start=None):
1140 if branch is None:
1119 if branch is None:
1141 branch = self[None].branch()
1120 branch = self[None].branch()
1142 branches = self.branchtags()
1121 branches = self.branchtags()
1143 if branch not in branches:
1122 if branch not in branches:
1144 return []
1123 return []
1145 # The basic algorithm is this:
1124 # The basic algorithm is this:
1146 #
1125 #
1147 # Start from the branch tip since there are no later revisions that can
1126 # Start from the branch tip since there are no later revisions that can
1148 # possibly be in this branch, and the tip is a guaranteed head.
1127 # possibly be in this branch, and the tip is a guaranteed head.
1149 #
1128 #
1150 # Remember the tip's parents as the first ancestors, since these by
1129 # Remember the tip's parents as the first ancestors, since these by
1151 # definition are not heads.
1130 # definition are not heads.
1152 #
1131 #
1153 # Step backwards from the brach tip through all the revisions. We are
1132 # Step backwards from the brach tip through all the revisions. We are
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1133 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 # nodes in reverse topological order (children before parents).
1134 # nodes in reverse topological order (children before parents).
1156 #
1135 #
1157 # If a revision is one of the ancestors of a head then we can toss it
1136 # If a revision is one of the ancestors of a head then we can toss it
1158 # out of the ancestors set (we've already found it and won't be
1137 # out of the ancestors set (we've already found it and won't be
1159 # visiting it again) and put its parents in the ancestors set.
1138 # visiting it again) and put its parents in the ancestors set.
1160 #
1139 #
1161 # Otherwise, if a revision is in the branch it's another head, since it
1140 # Otherwise, if a revision is in the branch it's another head, since it
1162 # wasn't in the ancestor list of an existing head. So add it to the
1141 # wasn't in the ancestor list of an existing head. So add it to the
1163 # head list, and add its parents to the ancestor list.
1142 # head list, and add its parents to the ancestor list.
1164 #
1143 #
1165 # If it is not in the branch ignore it.
1144 # If it is not in the branch ignore it.
1166 #
1145 #
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1146 # Once we have a list of heads, use nodesbetween to filter out all the
1168 # heads that cannot be reached from startrev. There may be a more
1147 # heads that cannot be reached from startrev. There may be a more
1169 # efficient way to do this as part of the previous algorithm.
1148 # efficient way to do this as part of the previous algorithm.
1170
1149
1171 set = util.set
1150 set = util.set
1172 heads = [self.changelog.rev(branches[branch])]
1151 heads = [self.changelog.rev(branches[branch])]
1173 # Don't care if ancestors contains nullrev or not.
1152 # Don't care if ancestors contains nullrev or not.
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1153 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1154 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 if rev in ancestors:
1155 if rev in ancestors:
1177 ancestors.update(self.changelog.parentrevs(rev))
1156 ancestors.update(self.changelog.parentrevs(rev))
1178 ancestors.remove(rev)
1157 ancestors.remove(rev)
1179 elif self[rev].branch() == branch:
1158 elif self[rev].branch() == branch:
1180 heads.append(rev)
1159 heads.append(rev)
1181 ancestors.update(self.changelog.parentrevs(rev))
1160 ancestors.update(self.changelog.parentrevs(rev))
1182 heads = [self.changelog.node(rev) for rev in heads]
1161 heads = [self.changelog.node(rev) for rev in heads]
1183 if start is not None:
1162 if start is not None:
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1163 heads = self.changelog.nodesbetween([start], heads)[2]
1185 return heads
1164 return heads
1186
1165
1187 def branches(self, nodes):
1166 def branches(self, nodes):
1188 if not nodes:
1167 if not nodes:
1189 nodes = [self.changelog.tip()]
1168 nodes = [self.changelog.tip()]
1190 b = []
1169 b = []
1191 for n in nodes:
1170 for n in nodes:
1192 t = n
1171 t = n
1193 while 1:
1172 while 1:
1194 p = self.changelog.parents(n)
1173 p = self.changelog.parents(n)
1195 if p[1] != nullid or p[0] == nullid:
1174 if p[1] != nullid or p[0] == nullid:
1196 b.append((t, n, p[0], p[1]))
1175 b.append((t, n, p[0], p[1]))
1197 break
1176 break
1198 n = p[0]
1177 n = p[0]
1199 return b
1178 return b
1200
1179
1201 def between(self, pairs):
1180 def between(self, pairs):
1202 r = []
1181 r = []
1203
1182
1204 for top, bottom in pairs:
1183 for top, bottom in pairs:
1205 n, l, i = top, [], 0
1184 n, l, i = top, [], 0
1206 f = 1
1185 f = 1
1207
1186
1208 while n != bottom:
1187 while n != bottom:
1209 p = self.changelog.parents(n)[0]
1188 p = self.changelog.parents(n)[0]
1210 if i == f:
1189 if i == f:
1211 l.append(n)
1190 l.append(n)
1212 f = f * 2
1191 f = f * 2
1213 n = p
1192 n = p
1214 i += 1
1193 i += 1
1215
1194
1216 r.append(l)
1195 r.append(l)
1217
1196
1218 return r
1197 return r
1219
1198
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1199 def findincoming(self, remote, base=None, heads=None, force=False):
1221 """Return list of roots of the subsets of missing nodes from remote
1200 """Return list of roots of the subsets of missing nodes from remote
1222
1201
1223 If base dict is specified, assume that these nodes and their parents
1202 If base dict is specified, assume that these nodes and their parents
1224 exist on the remote side and that no child of a node of base exists
1203 exist on the remote side and that no child of a node of base exists
1225 in both remote and self.
1204 in both remote and self.
1226 Furthermore base will be updated to include the nodes that exists
1205 Furthermore base will be updated to include the nodes that exists
1227 in self and remote but no children exists in self and remote.
1206 in self and remote but no children exists in self and remote.
1228 If a list of heads is specified, return only nodes which are heads
1207 If a list of heads is specified, return only nodes which are heads
1229 or ancestors of these heads.
1208 or ancestors of these heads.
1230
1209
1231 All the ancestors of base are in self and in remote.
1210 All the ancestors of base are in self and in remote.
1232 All the descendants of the list returned are missing in self.
1211 All the descendants of the list returned are missing in self.
1233 (and so we know that the rest of the nodes are missing in remote, see
1212 (and so we know that the rest of the nodes are missing in remote, see
1234 outgoing)
1213 outgoing)
1235 """
1214 """
1236 m = self.changelog.nodemap
1215 m = self.changelog.nodemap
1237 search = []
1216 search = []
1238 fetch = {}
1217 fetch = {}
1239 seen = {}
1218 seen = {}
1240 seenbranch = {}
1219 seenbranch = {}
1241 if base == None:
1220 if base == None:
1242 base = {}
1221 base = {}
1243
1222
1244 if not heads:
1223 if not heads:
1245 heads = remote.heads()
1224 heads = remote.heads()
1246
1225
1247 if self.changelog.tip() == nullid:
1226 if self.changelog.tip() == nullid:
1248 base[nullid] = 1
1227 base[nullid] = 1
1249 if heads != [nullid]:
1228 if heads != [nullid]:
1250 return [nullid]
1229 return [nullid]
1251 return []
1230 return []
1252
1231
1253 # assume we're closer to the tip than the root
1232 # assume we're closer to the tip than the root
1254 # and start by examining the heads
1233 # and start by examining the heads
1255 self.ui.status(_("searching for changes\n"))
1234 self.ui.status(_("searching for changes\n"))
1256
1235
1257 unknown = []
1236 unknown = []
1258 for h in heads:
1237 for h in heads:
1259 if h not in m:
1238 if h not in m:
1260 unknown.append(h)
1239 unknown.append(h)
1261 else:
1240 else:
1262 base[h] = 1
1241 base[h] = 1
1263
1242
1264 if not unknown:
1243 if not unknown:
1265 return []
1244 return []
1266
1245
1267 req = dict.fromkeys(unknown)
1246 req = dict.fromkeys(unknown)
1268 reqcnt = 0
1247 reqcnt = 0
1269
1248
1270 # search through remote branches
1249 # search through remote branches
1271 # a 'branch' here is a linear segment of history, with four parts:
1250 # a 'branch' here is a linear segment of history, with four parts:
1272 # head, root, first parent, second parent
1251 # head, root, first parent, second parent
1273 # (a branch always has two parents (or none) by definition)
1252 # (a branch always has two parents (or none) by definition)
1274 unknown = remote.branches(unknown)
1253 unknown = remote.branches(unknown)
1275 while unknown:
1254 while unknown:
1276 r = []
1255 r = []
1277 while unknown:
1256 while unknown:
1278 n = unknown.pop(0)
1257 n = unknown.pop(0)
1279 if n[0] in seen:
1258 if n[0] in seen:
1280 continue
1259 continue
1281
1260
1282 self.ui.debug(_("examining %s:%s\n")
1261 self.ui.debug(_("examining %s:%s\n")
1283 % (short(n[0]), short(n[1])))
1262 % (short(n[0]), short(n[1])))
1284 if n[0] == nullid: # found the end of the branch
1263 if n[0] == nullid: # found the end of the branch
1285 pass
1264 pass
1286 elif n in seenbranch:
1265 elif n in seenbranch:
1287 self.ui.debug(_("branch already found\n"))
1266 self.ui.debug(_("branch already found\n"))
1288 continue
1267 continue
1289 elif n[1] and n[1] in m: # do we know the base?
1268 elif n[1] and n[1] in m: # do we know the base?
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1269 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 % (short(n[0]), short(n[1])))
1270 % (short(n[0]), short(n[1])))
1292 search.append(n) # schedule branch range for scanning
1271 search.append(n) # schedule branch range for scanning
1293 seenbranch[n] = 1
1272 seenbranch[n] = 1
1294 else:
1273 else:
1295 if n[1] not in seen and n[1] not in fetch:
1274 if n[1] not in seen and n[1] not in fetch:
1296 if n[2] in m and n[3] in m:
1275 if n[2] in m and n[3] in m:
1297 self.ui.debug(_("found new changeset %s\n") %
1276 self.ui.debug(_("found new changeset %s\n") %
1298 short(n[1]))
1277 short(n[1]))
1299 fetch[n[1]] = 1 # earliest unknown
1278 fetch[n[1]] = 1 # earliest unknown
1300 for p in n[2:4]:
1279 for p in n[2:4]:
1301 if p in m:
1280 if p in m:
1302 base[p] = 1 # latest known
1281 base[p] = 1 # latest known
1303
1282
1304 for p in n[2:4]:
1283 for p in n[2:4]:
1305 if p not in req and p not in m:
1284 if p not in req and p not in m:
1306 r.append(p)
1285 r.append(p)
1307 req[p] = 1
1286 req[p] = 1
1308 seen[n[0]] = 1
1287 seen[n[0]] = 1
1309
1288
1310 if r:
1289 if r:
1311 reqcnt += 1
1290 reqcnt += 1
1312 self.ui.debug(_("request %d: %s\n") %
1291 self.ui.debug(_("request %d: %s\n") %
1313 (reqcnt, " ".join(map(short, r))))
1292 (reqcnt, " ".join(map(short, r))))
1314 for p in xrange(0, len(r), 10):
1293 for p in xrange(0, len(r), 10):
1315 for b in remote.branches(r[p:p+10]):
1294 for b in remote.branches(r[p:p+10]):
1316 self.ui.debug(_("received %s:%s\n") %
1295 self.ui.debug(_("received %s:%s\n") %
1317 (short(b[0]), short(b[1])))
1296 (short(b[0]), short(b[1])))
1318 unknown.append(b)
1297 unknown.append(b)
1319
1298
1320 # do binary search on the branches we found
1299 # do binary search on the branches we found
1321 while search:
1300 while search:
1322 n = search.pop(0)
1301 n = search.pop(0)
1323 reqcnt += 1
1302 reqcnt += 1
1324 l = remote.between([(n[0], n[1])])[0]
1303 l = remote.between([(n[0], n[1])])[0]
1325 l.append(n[1])
1304 l.append(n[1])
1326 p = n[0]
1305 p = n[0]
1327 f = 1
1306 f = 1
1328 for i in l:
1307 for i in l:
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1308 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 if i in m:
1309 if i in m:
1331 if f <= 2:
1310 if f <= 2:
1332 self.ui.debug(_("found new branch changeset %s\n") %
1311 self.ui.debug(_("found new branch changeset %s\n") %
1333 short(p))
1312 short(p))
1334 fetch[p] = 1
1313 fetch[p] = 1
1335 base[i] = 1
1314 base[i] = 1
1336 else:
1315 else:
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1316 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 % (short(p), short(i)))
1317 % (short(p), short(i)))
1339 search.append((p, i))
1318 search.append((p, i))
1340 break
1319 break
1341 p, f = i, f * 2
1320 p, f = i, f * 2
1342
1321
1343 # sanity check our fetch list
1322 # sanity check our fetch list
1344 for f in fetch.keys():
1323 for f in fetch.keys():
1345 if f in m:
1324 if f in m:
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1325 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347
1326
1348 if base.keys() == [nullid]:
1327 if base.keys() == [nullid]:
1349 if force:
1328 if force:
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1329 self.ui.warn(_("warning: repository is unrelated\n"))
1351 else:
1330 else:
1352 raise util.Abort(_("repository is unrelated"))
1331 raise util.Abort(_("repository is unrelated"))
1353
1332
1354 self.ui.debug(_("found new changesets starting at ") +
1333 self.ui.debug(_("found new changesets starting at ") +
1355 " ".join([short(f) for f in fetch]) + "\n")
1334 " ".join([short(f) for f in fetch]) + "\n")
1356
1335
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1336 self.ui.debug(_("%d total queries\n") % reqcnt)
1358
1337
1359 return fetch.keys()
1338 return fetch.keys()
1360
1339
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1340 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 """Return list of nodes that are roots of subsets not in remote
1341 """Return list of nodes that are roots of subsets not in remote
1363
1342
1364 If base dict is specified, assume that these nodes and their parents
1343 If base dict is specified, assume that these nodes and their parents
1365 exist on the remote side.
1344 exist on the remote side.
1366 If a list of heads is specified, return only nodes which are heads
1345 If a list of heads is specified, return only nodes which are heads
1367 or ancestors of these heads, and return a second element which
1346 or ancestors of these heads, and return a second element which
1368 contains all remote heads which get new children.
1347 contains all remote heads which get new children.
1369 """
1348 """
1370 if base == None:
1349 if base == None:
1371 base = {}
1350 base = {}
1372 self.findincoming(remote, base, heads, force=force)
1351 self.findincoming(remote, base, heads, force=force)
1373
1352
1374 self.ui.debug(_("common changesets up to ")
1353 self.ui.debug(_("common changesets up to ")
1375 + " ".join(map(short, base.keys())) + "\n")
1354 + " ".join(map(short, base.keys())) + "\n")
1376
1355
1377 remain = dict.fromkeys(self.changelog.nodemap)
1356 remain = dict.fromkeys(self.changelog.nodemap)
1378
1357
1379 # prune everything remote has from the tree
1358 # prune everything remote has from the tree
1380 del remain[nullid]
1359 del remain[nullid]
1381 remove = base.keys()
1360 remove = base.keys()
1382 while remove:
1361 while remove:
1383 n = remove.pop(0)
1362 n = remove.pop(0)
1384 if n in remain:
1363 if n in remain:
1385 del remain[n]
1364 del remain[n]
1386 for p in self.changelog.parents(n):
1365 for p in self.changelog.parents(n):
1387 remove.append(p)
1366 remove.append(p)
1388
1367
1389 # find every node whose parents have been pruned
1368 # find every node whose parents have been pruned
1390 subset = []
1369 subset = []
1391 # find every remote head that will get new children
1370 # find every remote head that will get new children
1392 updated_heads = {}
1371 updated_heads = {}
1393 for n in remain:
1372 for n in remain:
1394 p1, p2 = self.changelog.parents(n)
1373 p1, p2 = self.changelog.parents(n)
1395 if p1 not in remain and p2 not in remain:
1374 if p1 not in remain and p2 not in remain:
1396 subset.append(n)
1375 subset.append(n)
1397 if heads:
1376 if heads:
1398 if p1 in heads:
1377 if p1 in heads:
1399 updated_heads[p1] = True
1378 updated_heads[p1] = True
1400 if p2 in heads:
1379 if p2 in heads:
1401 updated_heads[p2] = True
1380 updated_heads[p2] = True
1402
1381
1403 # this is the set of all roots we have to push
1382 # this is the set of all roots we have to push
1404 if heads:
1383 if heads:
1405 return subset, updated_heads.keys()
1384 return subset, updated_heads.keys()
1406 else:
1385 else:
1407 return subset
1386 return subset
1408
1387
1409 def pull(self, remote, heads=None, force=False):
1388 def pull(self, remote, heads=None, force=False):
1410 lock = self.lock()
1389 lock = self.lock()
1411 try:
1390 try:
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1391 fetch = self.findincoming(remote, heads=heads, force=force)
1413 if fetch == [nullid]:
1392 if fetch == [nullid]:
1414 self.ui.status(_("requesting all changes\n"))
1393 self.ui.status(_("requesting all changes\n"))
1415
1394
1416 if not fetch:
1395 if not fetch:
1417 self.ui.status(_("no changes found\n"))
1396 self.ui.status(_("no changes found\n"))
1418 return 0
1397 return 0
1419
1398
1420 if heads is None:
1399 if heads is None:
1421 cg = remote.changegroup(fetch, 'pull')
1400 cg = remote.changegroup(fetch, 'pull')
1422 else:
1401 else:
1423 if 'changegroupsubset' not in remote.capabilities:
1402 if 'changegroupsubset' not in remote.capabilities:
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1403 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1404 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 return self.addchangegroup(cg, 'pull', remote.url())
1405 return self.addchangegroup(cg, 'pull', remote.url())
1427 finally:
1406 finally:
1428 del lock
1407 del lock
1429
1408
1430 def push(self, remote, force=False, revs=None):
1409 def push(self, remote, force=False, revs=None):
1431 # there are two ways to push to remote repo:
1410 # there are two ways to push to remote repo:
1432 #
1411 #
1433 # addchangegroup assumes local user can lock remote
1412 # addchangegroup assumes local user can lock remote
1434 # repo (local filesystem, old ssh servers).
1413 # repo (local filesystem, old ssh servers).
1435 #
1414 #
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1415 # unbundle assumes local user cannot lock remote repo (new ssh
1437 # servers, http servers).
1416 # servers, http servers).
1438
1417
1439 if remote.capable('unbundle'):
1418 if remote.capable('unbundle'):
1440 return self.push_unbundle(remote, force, revs)
1419 return self.push_unbundle(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1420 return self.push_addchangegroup(remote, force, revs)
1442
1421
1443 def prepush(self, remote, force, revs):
1422 def prepush(self, remote, force, revs):
1444 base = {}
1423 base = {}
1445 remote_heads = remote.heads()
1424 remote_heads = remote.heads()
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1425 inc = self.findincoming(remote, base, remote_heads, force=force)
1447
1426
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1427 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 if revs is not None:
1428 if revs is not None:
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1429 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 else:
1430 else:
1452 bases, heads = update, self.changelog.heads()
1431 bases, heads = update, self.changelog.heads()
1453
1432
1454 if not bases:
1433 if not bases:
1455 self.ui.status(_("no changes found\n"))
1434 self.ui.status(_("no changes found\n"))
1456 return None, 1
1435 return None, 1
1457 elif not force:
1436 elif not force:
1458 # check if we're creating new remote heads
1437 # check if we're creating new remote heads
1459 # to be a remote head after push, node must be either
1438 # to be a remote head after push, node must be either
1460 # - unknown locally
1439 # - unknown locally
1461 # - a local outgoing head descended from update
1440 # - a local outgoing head descended from update
1462 # - a remote head that's known locally and not
1441 # - a remote head that's known locally and not
1463 # ancestral to an outgoing head
1442 # ancestral to an outgoing head
1464
1443
1465 warn = 0
1444 warn = 0
1466
1445
1467 if remote_heads == [nullid]:
1446 if remote_heads == [nullid]:
1468 warn = 0
1447 warn = 0
1469 elif not revs and len(heads) > len(remote_heads):
1448 elif not revs and len(heads) > len(remote_heads):
1470 warn = 1
1449 warn = 1
1471 else:
1450 else:
1472 newheads = list(heads)
1451 newheads = list(heads)
1473 for r in remote_heads:
1452 for r in remote_heads:
1474 if r in self.changelog.nodemap:
1453 if r in self.changelog.nodemap:
1475 desc = self.changelog.heads(r, heads)
1454 desc = self.changelog.heads(r, heads)
1476 l = [h for h in heads if h in desc]
1455 l = [h for h in heads if h in desc]
1477 if not l:
1456 if not l:
1478 newheads.append(r)
1457 newheads.append(r)
1479 else:
1458 else:
1480 newheads.append(r)
1459 newheads.append(r)
1481 if len(newheads) > len(remote_heads):
1460 if len(newheads) > len(remote_heads):
1482 warn = 1
1461 warn = 1
1483
1462
1484 if warn:
1463 if warn:
1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1464 self.ui.warn(_("abort: push creates new remote heads!\n"))
1486 self.ui.status(_("(did you forget to merge?"
1465 self.ui.status(_("(did you forget to merge?"
1487 " use push -f to force)\n"))
1466 " use push -f to force)\n"))
1488 return None, 0
1467 return None, 0
1489 elif inc:
1468 elif inc:
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1469 self.ui.warn(_("note: unsynced remote changes!\n"))
1491
1470
1492
1471
1493 if revs is None:
1472 if revs is None:
1494 cg = self.changegroup(update, 'push')
1473 cg = self.changegroup(update, 'push')
1495 else:
1474 else:
1496 cg = self.changegroupsubset(update, revs, 'push')
1475 cg = self.changegroupsubset(update, revs, 'push')
1497 return cg, remote_heads
1476 return cg, remote_heads
1498
1477
1499 def push_addchangegroup(self, remote, force, revs):
1478 def push_addchangegroup(self, remote, force, revs):
1500 lock = remote.lock()
1479 lock = remote.lock()
1501 try:
1480 try:
1502 ret = self.prepush(remote, force, revs)
1481 ret = self.prepush(remote, force, revs)
1503 if ret[0] is not None:
1482 if ret[0] is not None:
1504 cg, remote_heads = ret
1483 cg, remote_heads = ret
1505 return remote.addchangegroup(cg, 'push', self.url())
1484 return remote.addchangegroup(cg, 'push', self.url())
1506 return ret[1]
1485 return ret[1]
1507 finally:
1486 finally:
1508 del lock
1487 del lock
1509
1488
1510 def push_unbundle(self, remote, force, revs):
1489 def push_unbundle(self, remote, force, revs):
1511 # local repo finds heads on server, finds out what revs it
1490 # local repo finds heads on server, finds out what revs it
1512 # must push. once revs transferred, if server finds it has
1491 # must push. once revs transferred, if server finds it has
1513 # different heads (someone else won commit/push race), server
1492 # different heads (someone else won commit/push race), server
1514 # aborts.
1493 # aborts.
1515
1494
1516 ret = self.prepush(remote, force, revs)
1495 ret = self.prepush(remote, force, revs)
1517 if ret[0] is not None:
1496 if ret[0] is not None:
1518 cg, remote_heads = ret
1497 cg, remote_heads = ret
1519 if force: remote_heads = ['force']
1498 if force: remote_heads = ['force']
1520 return remote.unbundle(cg, remote_heads, 'push')
1499 return remote.unbundle(cg, remote_heads, 'push')
1521 return ret[1]
1500 return ret[1]
1522
1501
1523 def changegroupinfo(self, nodes, source):
1502 def changegroupinfo(self, nodes, source):
1524 if self.ui.verbose or source == 'bundle':
1503 if self.ui.verbose or source == 'bundle':
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1504 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 if self.ui.debugflag:
1505 if self.ui.debugflag:
1527 self.ui.debug(_("List of changesets:\n"))
1506 self.ui.debug(_("List of changesets:\n"))
1528 for node in nodes:
1507 for node in nodes:
1529 self.ui.debug("%s\n" % hex(node))
1508 self.ui.debug("%s\n" % hex(node))
1530
1509
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1510 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 """This function generates a changegroup consisting of all the nodes
1511 """This function generates a changegroup consisting of all the nodes
1533 that are descendents of any of the bases, and ancestors of any of
1512 that are descendents of any of the bases, and ancestors of any of
1534 the heads.
1513 the heads.
1535
1514
1536 It is fairly complex as determining which filenodes and which
1515 It is fairly complex as determining which filenodes and which
1537 manifest nodes need to be included for the changeset to be complete
1516 manifest nodes need to be included for the changeset to be complete
1538 is non-trivial.
1517 is non-trivial.
1539
1518
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1519 Another wrinkle is doing the reverse, figuring out which changeset in
1541 the changegroup a particular filenode or manifestnode belongs to.
1520 the changegroup a particular filenode or manifestnode belongs to.
1542
1521
1543 The caller can specify some nodes that must be included in the
1522 The caller can specify some nodes that must be included in the
1544 changegroup using the extranodes argument. It should be a dict
1523 changegroup using the extranodes argument. It should be a dict
1545 where the keys are the filenames (or 1 for the manifest), and the
1524 where the keys are the filenames (or 1 for the manifest), and the
1546 values are lists of (node, linknode) tuples, where node is a wanted
1525 values are lists of (node, linknode) tuples, where node is a wanted
1547 node and linknode is the changelog node that should be transmitted as
1526 node and linknode is the changelog node that should be transmitted as
1548 the linkrev.
1527 the linkrev.
1549 """
1528 """
1550
1529
1551 self.hook('preoutgoing', throw=True, source=source)
1530 self.hook('preoutgoing', throw=True, source=source)
1552
1531
1553 # Set up some initial variables
1532 # Set up some initial variables
1554 # Make it easy to refer to self.changelog
1533 # Make it easy to refer to self.changelog
1555 cl = self.changelog
1534 cl = self.changelog
1556 # msng is short for missing - compute the list of changesets in this
1535 # msng is short for missing - compute the list of changesets in this
1557 # changegroup.
1536 # changegroup.
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1537 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 self.changegroupinfo(msng_cl_lst, source)
1538 self.changegroupinfo(msng_cl_lst, source)
1560 # Some bases may turn out to be superfluous, and some heads may be
1539 # Some bases may turn out to be superfluous, and some heads may be
1561 # too. nodesbetween will return the minimal set of bases and heads
1540 # too. nodesbetween will return the minimal set of bases and heads
1562 # necessary to re-create the changegroup.
1541 # necessary to re-create the changegroup.
1563
1542
1564 # Known heads are the list of heads that it is assumed the recipient
1543 # Known heads are the list of heads that it is assumed the recipient
1565 # of this changegroup will know about.
1544 # of this changegroup will know about.
1566 knownheads = {}
1545 knownheads = {}
1567 # We assume that all parents of bases are known heads.
1546 # We assume that all parents of bases are known heads.
1568 for n in bases:
1547 for n in bases:
1569 for p in cl.parents(n):
1548 for p in cl.parents(n):
1570 if p != nullid:
1549 if p != nullid:
1571 knownheads[p] = 1
1550 knownheads[p] = 1
1572 knownheads = knownheads.keys()
1551 knownheads = knownheads.keys()
1573 if knownheads:
1552 if knownheads:
1574 # Now that we know what heads are known, we can compute which
1553 # Now that we know what heads are known, we can compute which
1575 # changesets are known. The recipient must know about all
1554 # changesets are known. The recipient must know about all
1576 # changesets required to reach the known heads from the null
1555 # changesets required to reach the known heads from the null
1577 # changeset.
1556 # changeset.
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1557 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 junk = None
1558 junk = None
1580 # Transform the list into an ersatz set.
1559 # Transform the list into an ersatz set.
1581 has_cl_set = dict.fromkeys(has_cl_set)
1560 has_cl_set = dict.fromkeys(has_cl_set)
1582 else:
1561 else:
1583 # If there were no known heads, the recipient cannot be assumed to
1562 # If there were no known heads, the recipient cannot be assumed to
1584 # know about any changesets.
1563 # know about any changesets.
1585 has_cl_set = {}
1564 has_cl_set = {}
1586
1565
1587 # Make it easy to refer to self.manifest
1566 # Make it easy to refer to self.manifest
1588 mnfst = self.manifest
1567 mnfst = self.manifest
1589 # We don't know which manifests are missing yet
1568 # We don't know which manifests are missing yet
1590 msng_mnfst_set = {}
1569 msng_mnfst_set = {}
1591 # Nor do we know which filenodes are missing.
1570 # Nor do we know which filenodes are missing.
1592 msng_filenode_set = {}
1571 msng_filenode_set = {}
1593
1572
1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1573 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1595 junk = None
1574 junk = None
1596
1575
1597 # A changeset always belongs to itself, so the changenode lookup
1576 # A changeset always belongs to itself, so the changenode lookup
1598 # function for a changenode is identity.
1577 # function for a changenode is identity.
1599 def identity(x):
1578 def identity(x):
1600 return x
1579 return x
1601
1580
1602 # A function generating function. Sets up an environment for the
1581 # A function generating function. Sets up an environment for the
1603 # inner function.
1582 # inner function.
1604 def cmp_by_rev_func(revlog):
1583 def cmp_by_rev_func(revlog):
1605 # Compare two nodes by their revision number in the environment's
1584 # Compare two nodes by their revision number in the environment's
1606 # revision history. Since the revision number both represents the
1585 # revision history. Since the revision number both represents the
1607 # most efficient order to read the nodes in, and represents a
1586 # most efficient order to read the nodes in, and represents a
1608 # topological sorting of the nodes, this function is often useful.
1587 # topological sorting of the nodes, this function is often useful.
1609 def cmp_by_rev(a, b):
1588 def cmp_by_rev(a, b):
1610 return cmp(revlog.rev(a), revlog.rev(b))
1589 return cmp(revlog.rev(a), revlog.rev(b))
1611 return cmp_by_rev
1590 return cmp_by_rev
1612
1591
1613 # If we determine that a particular file or manifest node must be a
1592 # If we determine that a particular file or manifest node must be a
1614 # node that the recipient of the changegroup will already have, we can
1593 # node that the recipient of the changegroup will already have, we can
1615 # also assume the recipient will have all the parents. This function
1594 # also assume the recipient will have all the parents. This function
1616 # prunes them from the set of missing nodes.
1595 # prunes them from the set of missing nodes.
1617 def prune_parents(revlog, hasset, msngset):
1596 def prune_parents(revlog, hasset, msngset):
1618 haslst = hasset.keys()
1597 haslst = hasset.keys()
1619 haslst.sort(cmp_by_rev_func(revlog))
1598 haslst.sort(cmp_by_rev_func(revlog))
1620 for node in haslst:
1599 for node in haslst:
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1600 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 while parentlst:
1601 while parentlst:
1623 n = parentlst.pop()
1602 n = parentlst.pop()
1624 if n not in hasset:
1603 if n not in hasset:
1625 hasset[n] = 1
1604 hasset[n] = 1
1626 p = [p for p in revlog.parents(n) if p != nullid]
1605 p = [p for p in revlog.parents(n) if p != nullid]
1627 parentlst.extend(p)
1606 parentlst.extend(p)
1628 for n in hasset:
1607 for n in hasset:
1629 msngset.pop(n, None)
1608 msngset.pop(n, None)
1630
1609
1631 # This is a function generating function used to set up an environment
1610 # This is a function generating function used to set up an environment
1632 # for the inner function to execute in.
1611 # for the inner function to execute in.
1633 def manifest_and_file_collector(changedfileset):
1612 def manifest_and_file_collector(changedfileset):
1634 # This is an information gathering function that gathers
1613 # This is an information gathering function that gathers
1635 # information from each changeset node that goes out as part of
1614 # information from each changeset node that goes out as part of
1636 # the changegroup. The information gathered is a list of which
1615 # the changegroup. The information gathered is a list of which
1637 # manifest nodes are potentially required (the recipient may
1616 # manifest nodes are potentially required (the recipient may
1638 # already have them) and total list of all files which were
1617 # already have them) and total list of all files which were
1639 # changed in any changeset in the changegroup.
1618 # changed in any changeset in the changegroup.
1640 #
1619 #
1641 # We also remember the first changenode we saw any manifest
1620 # We also remember the first changenode we saw any manifest
1642 # referenced by so we can later determine which changenode 'owns'
1621 # referenced by so we can later determine which changenode 'owns'
1643 # the manifest.
1622 # the manifest.
1644 def collect_manifests_and_files(clnode):
1623 def collect_manifests_and_files(clnode):
1645 c = cl.read(clnode)
1624 c = cl.read(clnode)
1646 for f in c[3]:
1625 for f in c[3]:
1647 # This is to make sure we only have one instance of each
1626 # This is to make sure we only have one instance of each
1648 # filename string for each filename.
1627 # filename string for each filename.
1649 changedfileset.setdefault(f, f)
1628 changedfileset.setdefault(f, f)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1629 msng_mnfst_set.setdefault(c[0], clnode)
1651 return collect_manifests_and_files
1630 return collect_manifests_and_files
1652
1631
1653 # Figure out which manifest nodes (of the ones we think might be part
1632 # Figure out which manifest nodes (of the ones we think might be part
1654 # of the changegroup) the recipient must know about and remove them
1633 # of the changegroup) the recipient must know about and remove them
1655 # from the changegroup.
1634 # from the changegroup.
1656 def prune_manifests():
1635 def prune_manifests():
1657 has_mnfst_set = {}
1636 has_mnfst_set = {}
1658 for n in msng_mnfst_set:
1637 for n in msng_mnfst_set:
1659 # If a 'missing' manifest thinks it belongs to a changenode
1638 # If a 'missing' manifest thinks it belongs to a changenode
1660 # the recipient is assumed to have, obviously the recipient
1639 # the recipient is assumed to have, obviously the recipient
1661 # must have that manifest.
1640 # must have that manifest.
1662 linknode = cl.node(mnfst.linkrev(n))
1641 linknode = cl.node(mnfst.linkrev(n))
1663 if linknode in has_cl_set:
1642 if linknode in has_cl_set:
1664 has_mnfst_set[n] = 1
1643 has_mnfst_set[n] = 1
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1644 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666
1645
1667 # Use the information collected in collect_manifests_and_files to say
1646 # Use the information collected in collect_manifests_and_files to say
1668 # which changenode any manifestnode belongs to.
1647 # which changenode any manifestnode belongs to.
1669 def lookup_manifest_link(mnfstnode):
1648 def lookup_manifest_link(mnfstnode):
1670 return msng_mnfst_set[mnfstnode]
1649 return msng_mnfst_set[mnfstnode]
1671
1650
1672 # A function generating function that sets up the initial environment
1651 # A function generating function that sets up the initial environment
1673 # the inner function.
1652 # the inner function.
1674 def filenode_collector(changedfiles):
1653 def filenode_collector(changedfiles):
1675 next_rev = [0]
1654 next_rev = [0]
1676 # This gathers information from each manifestnode included in the
1655 # This gathers information from each manifestnode included in the
1677 # changegroup about which filenodes the manifest node references
1656 # changegroup about which filenodes the manifest node references
1678 # so we can include those in the changegroup too.
1657 # so we can include those in the changegroup too.
1679 #
1658 #
1680 # It also remembers which changenode each filenode belongs to. It
1659 # It also remembers which changenode each filenode belongs to. It
1681 # does this by assuming the a filenode belongs to the changenode
1660 # does this by assuming the a filenode belongs to the changenode
1682 # the first manifest that references it belongs to.
1661 # the first manifest that references it belongs to.
1683 def collect_msng_filenodes(mnfstnode):
1662 def collect_msng_filenodes(mnfstnode):
1684 r = mnfst.rev(mnfstnode)
1663 r = mnfst.rev(mnfstnode)
1685 if r == next_rev[0]:
1664 if r == next_rev[0]:
1686 # If the last rev we looked at was the one just previous,
1665 # If the last rev we looked at was the one just previous,
1687 # we only need to see a diff.
1666 # we only need to see a diff.
1688 deltamf = mnfst.readdelta(mnfstnode)
1667 deltamf = mnfst.readdelta(mnfstnode)
1689 # For each line in the delta
1668 # For each line in the delta
1690 for f, fnode in deltamf.items():
1669 for f, fnode in deltamf.items():
1691 f = changedfiles.get(f, None)
1670 f = changedfiles.get(f, None)
1692 # And if the file is in the list of files we care
1671 # And if the file is in the list of files we care
1693 # about.
1672 # about.
1694 if f is not None:
1673 if f is not None:
1695 # Get the changenode this manifest belongs to
1674 # Get the changenode this manifest belongs to
1696 clnode = msng_mnfst_set[mnfstnode]
1675 clnode = msng_mnfst_set[mnfstnode]
1697 # Create the set of filenodes for the file if
1676 # Create the set of filenodes for the file if
1698 # there isn't one already.
1677 # there isn't one already.
1699 ndset = msng_filenode_set.setdefault(f, {})
1678 ndset = msng_filenode_set.setdefault(f, {})
1700 # And set the filenode's changelog node to the
1679 # And set the filenode's changelog node to the
1701 # manifest's if it hasn't been set already.
1680 # manifest's if it hasn't been set already.
1702 ndset.setdefault(fnode, clnode)
1681 ndset.setdefault(fnode, clnode)
1703 else:
1682 else:
1704 # Otherwise we need a full manifest.
1683 # Otherwise we need a full manifest.
1705 m = mnfst.read(mnfstnode)
1684 m = mnfst.read(mnfstnode)
1706 # For every file in we care about.
1685 # For every file in we care about.
1707 for f in changedfiles:
1686 for f in changedfiles:
1708 fnode = m.get(f, None)
1687 fnode = m.get(f, None)
1709 # If it's in the manifest
1688 # If it's in the manifest
1710 if fnode is not None:
1689 if fnode is not None:
1711 # See comments above.
1690 # See comments above.
1712 clnode = msng_mnfst_set[mnfstnode]
1691 clnode = msng_mnfst_set[mnfstnode]
1713 ndset = msng_filenode_set.setdefault(f, {})
1692 ndset = msng_filenode_set.setdefault(f, {})
1714 ndset.setdefault(fnode, clnode)
1693 ndset.setdefault(fnode, clnode)
1715 # Remember the revision we hope to see next.
1694 # Remember the revision we hope to see next.
1716 next_rev[0] = r + 1
1695 next_rev[0] = r + 1
1717 return collect_msng_filenodes
1696 return collect_msng_filenodes
1718
1697
1719 # We have a list of filenodes we think we need for a file, lets remove
1698 # We have a list of filenodes we think we need for a file, lets remove
1720 # all those we now the recipient must have.
1699 # all those we now the recipient must have.
1721 def prune_filenodes(f, filerevlog):
1700 def prune_filenodes(f, filerevlog):
1722 msngset = msng_filenode_set[f]
1701 msngset = msng_filenode_set[f]
1723 hasset = {}
1702 hasset = {}
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1703 # If a 'missing' filenode thinks it belongs to a changenode we
1725 # assume the recipient must have, then the recipient must have
1704 # assume the recipient must have, then the recipient must have
1726 # that filenode.
1705 # that filenode.
1727 for n in msngset:
1706 for n in msngset:
1728 clnode = cl.node(filerevlog.linkrev(n))
1707 clnode = cl.node(filerevlog.linkrev(n))
1729 if clnode in has_cl_set:
1708 if clnode in has_cl_set:
1730 hasset[n] = 1
1709 hasset[n] = 1
1731 prune_parents(filerevlog, hasset, msngset)
1710 prune_parents(filerevlog, hasset, msngset)
1732
1711
1733 # A function generator function that sets up the a context for the
1712 # A function generator function that sets up the a context for the
1734 # inner function.
1713 # inner function.
1735 def lookup_filenode_link_func(fname):
1714 def lookup_filenode_link_func(fname):
1736 msngset = msng_filenode_set[fname]
1715 msngset = msng_filenode_set[fname]
1737 # Lookup the changenode the filenode belongs to.
1716 # Lookup the changenode the filenode belongs to.
1738 def lookup_filenode_link(fnode):
1717 def lookup_filenode_link(fnode):
1739 return msngset[fnode]
1718 return msngset[fnode]
1740 return lookup_filenode_link
1719 return lookup_filenode_link
1741
1720
1742 # Add the nodes that were explicitly requested.
1721 # Add the nodes that were explicitly requested.
1743 def add_extra_nodes(name, nodes):
1722 def add_extra_nodes(name, nodes):
1744 if not extranodes or name not in extranodes:
1723 if not extranodes or name not in extranodes:
1745 return
1724 return
1746
1725
1747 for node, linknode in extranodes[name]:
1726 for node, linknode in extranodes[name]:
1748 if node not in nodes:
1727 if node not in nodes:
1749 nodes[node] = linknode
1728 nodes[node] = linknode
1750
1729
1751 # Now that we have all theses utility functions to help out and
1730 # Now that we have all theses utility functions to help out and
1752 # logically divide up the task, generate the group.
1731 # logically divide up the task, generate the group.
1753 def gengroup():
1732 def gengroup():
1754 # The set of changed files starts empty.
1733 # The set of changed files starts empty.
1755 changedfiles = {}
1734 changedfiles = {}
1756 # Create a changenode group generator that will call our functions
1735 # Create a changenode group generator that will call our functions
1757 # back to lookup the owning changenode and collect information.
1736 # back to lookup the owning changenode and collect information.
1758 group = cl.group(msng_cl_lst, identity,
1737 group = cl.group(msng_cl_lst, identity,
1759 manifest_and_file_collector(changedfiles))
1738 manifest_and_file_collector(changedfiles))
1760 for chnk in group:
1739 for chnk in group:
1761 yield chnk
1740 yield chnk
1762
1741
1763 # The list of manifests has been collected by the generator
1742 # The list of manifests has been collected by the generator
1764 # calling our functions back.
1743 # calling our functions back.
1765 prune_manifests()
1744 prune_manifests()
1766 add_extra_nodes(1, msng_mnfst_set)
1745 add_extra_nodes(1, msng_mnfst_set)
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1746 msng_mnfst_lst = msng_mnfst_set.keys()
1768 # Sort the manifestnodes by revision number.
1747 # Sort the manifestnodes by revision number.
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1748 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 # Create a generator for the manifestnodes that calls our lookup
1749 # Create a generator for the manifestnodes that calls our lookup
1771 # and data collection functions back.
1750 # and data collection functions back.
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1751 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 filenode_collector(changedfiles))
1752 filenode_collector(changedfiles))
1774 for chnk in group:
1753 for chnk in group:
1775 yield chnk
1754 yield chnk
1776
1755
1777 # These are no longer needed, dereference and toss the memory for
1756 # These are no longer needed, dereference and toss the memory for
1778 # them.
1757 # them.
1779 msng_mnfst_lst = None
1758 msng_mnfst_lst = None
1780 msng_mnfst_set.clear()
1759 msng_mnfst_set.clear()
1781
1760
1782 if extranodes:
1761 if extranodes:
1783 for fname in extranodes:
1762 for fname in extranodes:
1784 if isinstance(fname, int):
1763 if isinstance(fname, int):
1785 continue
1764 continue
1786 add_extra_nodes(fname,
1765 add_extra_nodes(fname,
1787 msng_filenode_set.setdefault(fname, {}))
1766 msng_filenode_set.setdefault(fname, {}))
1788 changedfiles[fname] = 1
1767 changedfiles[fname] = 1
1789 # Go through all our files in order sorted by name.
1768 # Go through all our files in order sorted by name.
1790 for fname in util.sort(changedfiles):
1769 for fname in util.sort(changedfiles):
1791 filerevlog = self.file(fname)
1770 filerevlog = self.file(fname)
1792 if not len(filerevlog):
1771 if not len(filerevlog):
1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1772 raise util.Abort(_("empty or missing revlog for %s") % fname)
1794 # Toss out the filenodes that the recipient isn't really
1773 # Toss out the filenodes that the recipient isn't really
1795 # missing.
1774 # missing.
1796 if fname in msng_filenode_set:
1775 if fname in msng_filenode_set:
1797 prune_filenodes(fname, filerevlog)
1776 prune_filenodes(fname, filerevlog)
1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1777 msng_filenode_lst = msng_filenode_set[fname].keys()
1799 else:
1778 else:
1800 msng_filenode_lst = []
1779 msng_filenode_lst = []
1801 # If any filenodes are left, generate the group for them,
1780 # If any filenodes are left, generate the group for them,
1802 # otherwise don't bother.
1781 # otherwise don't bother.
1803 if len(msng_filenode_lst) > 0:
1782 if len(msng_filenode_lst) > 0:
1804 yield changegroup.chunkheader(len(fname))
1783 yield changegroup.chunkheader(len(fname))
1805 yield fname
1784 yield fname
1806 # Sort the filenodes by their revision #
1785 # Sort the filenodes by their revision #
1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1786 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1808 # Create a group generator and only pass in a changenode
1787 # Create a group generator and only pass in a changenode
1809 # lookup function as we need to collect no information
1788 # lookup function as we need to collect no information
1810 # from filenodes.
1789 # from filenodes.
1811 group = filerevlog.group(msng_filenode_lst,
1790 group = filerevlog.group(msng_filenode_lst,
1812 lookup_filenode_link_func(fname))
1791 lookup_filenode_link_func(fname))
1813 for chnk in group:
1792 for chnk in group:
1814 yield chnk
1793 yield chnk
1815 if fname in msng_filenode_set:
1794 if fname in msng_filenode_set:
1816 # Don't need this anymore, toss it to free memory.
1795 # Don't need this anymore, toss it to free memory.
1817 del msng_filenode_set[fname]
1796 del msng_filenode_set[fname]
1818 # Signal that no more groups are left.
1797 # Signal that no more groups are left.
1819 yield changegroup.closechunk()
1798 yield changegroup.closechunk()
1820
1799
1821 if msng_cl_lst:
1800 if msng_cl_lst:
1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1801 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1823
1802
1824 return util.chunkbuffer(gengroup())
1803 return util.chunkbuffer(gengroup())
1825
1804
1826 def changegroup(self, basenodes, source):
1805 def changegroup(self, basenodes, source):
1827 """Generate a changegroup of all nodes that we have that a recipient
1806 """Generate a changegroup of all nodes that we have that a recipient
1828 doesn't.
1807 doesn't.
1829
1808
1830 This is much easier than the previous function as we can assume that
1809 This is much easier than the previous function as we can assume that
1831 the recipient has any changenode we aren't sending them."""
1810 the recipient has any changenode we aren't sending them."""
1832
1811
1833 self.hook('preoutgoing', throw=True, source=source)
1812 self.hook('preoutgoing', throw=True, source=source)
1834
1813
1835 cl = self.changelog
1814 cl = self.changelog
1836 nodes = cl.nodesbetween(basenodes, None)[0]
1815 nodes = cl.nodesbetween(basenodes, None)[0]
1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1816 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1838 self.changegroupinfo(nodes, source)
1817 self.changegroupinfo(nodes, source)
1839
1818
1840 def identity(x):
1819 def identity(x):
1841 return x
1820 return x
1842
1821
1843 def gennodelst(log):
1822 def gennodelst(log):
1844 for r in log:
1823 for r in log:
1845 n = log.node(r)
1824 n = log.node(r)
1846 if log.linkrev(n) in revset:
1825 if log.linkrev(n) in revset:
1847 yield n
1826 yield n
1848
1827
1849 def changed_file_collector(changedfileset):
1828 def changed_file_collector(changedfileset):
1850 def collect_changed_files(clnode):
1829 def collect_changed_files(clnode):
1851 c = cl.read(clnode)
1830 c = cl.read(clnode)
1852 for fname in c[3]:
1831 for fname in c[3]:
1853 changedfileset[fname] = 1
1832 changedfileset[fname] = 1
1854 return collect_changed_files
1833 return collect_changed_files
1855
1834
1856 def lookuprevlink_func(revlog):
1835 def lookuprevlink_func(revlog):
1857 def lookuprevlink(n):
1836 def lookuprevlink(n):
1858 return cl.node(revlog.linkrev(n))
1837 return cl.node(revlog.linkrev(n))
1859 return lookuprevlink
1838 return lookuprevlink
1860
1839
1861 def gengroup():
1840 def gengroup():
1862 # construct a list of all changed files
1841 # construct a list of all changed files
1863 changedfiles = {}
1842 changedfiles = {}
1864
1843
1865 for chnk in cl.group(nodes, identity,
1844 for chnk in cl.group(nodes, identity,
1866 changed_file_collector(changedfiles)):
1845 changed_file_collector(changedfiles)):
1867 yield chnk
1846 yield chnk
1868
1847
1869 mnfst = self.manifest
1848 mnfst = self.manifest
1870 nodeiter = gennodelst(mnfst)
1849 nodeiter = gennodelst(mnfst)
1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1850 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1872 yield chnk
1851 yield chnk
1873
1852
1874 for fname in util.sort(changedfiles):
1853 for fname in util.sort(changedfiles):
1875 filerevlog = self.file(fname)
1854 filerevlog = self.file(fname)
1876 if not len(filerevlog):
1855 if not len(filerevlog):
1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1856 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 nodeiter = gennodelst(filerevlog)
1857 nodeiter = gennodelst(filerevlog)
1879 nodeiter = list(nodeiter)
1858 nodeiter = list(nodeiter)
1880 if nodeiter:
1859 if nodeiter:
1881 yield changegroup.chunkheader(len(fname))
1860 yield changegroup.chunkheader(len(fname))
1882 yield fname
1861 yield fname
1883 lookup = lookuprevlink_func(filerevlog)
1862 lookup = lookuprevlink_func(filerevlog)
1884 for chnk in filerevlog.group(nodeiter, lookup):
1863 for chnk in filerevlog.group(nodeiter, lookup):
1885 yield chnk
1864 yield chnk
1886
1865
1887 yield changegroup.closechunk()
1866 yield changegroup.closechunk()
1888
1867
1889 if nodes:
1868 if nodes:
1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1869 self.hook('outgoing', node=hex(nodes[0]), source=source)
1891
1870
1892 return util.chunkbuffer(gengroup())
1871 return util.chunkbuffer(gengroup())
1893
1872
1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1873 def addchangegroup(self, source, srctype, url, emptyok=False):
1895 """add changegroup to repo.
1874 """add changegroup to repo.
1896
1875
1897 return values:
1876 return values:
1898 - nothing changed or no source: 0
1877 - nothing changed or no source: 0
1899 - more heads than before: 1+added heads (2..n)
1878 - more heads than before: 1+added heads (2..n)
1900 - less heads than before: -1-removed heads (-2..-n)
1879 - less heads than before: -1-removed heads (-2..-n)
1901 - number of heads stays the same: 1
1880 - number of heads stays the same: 1
1902 """
1881 """
1903 def csmap(x):
1882 def csmap(x):
1904 self.ui.debug(_("add changeset %s\n") % short(x))
1883 self.ui.debug(_("add changeset %s\n") % short(x))
1905 return len(cl)
1884 return len(cl)
1906
1885
1907 def revmap(x):
1886 def revmap(x):
1908 return cl.rev(x)
1887 return cl.rev(x)
1909
1888
1910 if not source:
1889 if not source:
1911 return 0
1890 return 0
1912
1891
1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1892 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1914
1893
1915 changesets = files = revisions = 0
1894 changesets = files = revisions = 0
1916
1895
1917 # write changelog data to temp files so concurrent readers will not see
1896 # write changelog data to temp files so concurrent readers will not see
1918 # inconsistent view
1897 # inconsistent view
1919 cl = self.changelog
1898 cl = self.changelog
1920 cl.delayupdate()
1899 cl.delayupdate()
1921 oldheads = len(cl.heads())
1900 oldheads = len(cl.heads())
1922
1901
1923 tr = self.transaction()
1902 tr = self.transaction()
1924 try:
1903 try:
1925 trp = weakref.proxy(tr)
1904 trp = weakref.proxy(tr)
1926 # pull off the changeset group
1905 # pull off the changeset group
1927 self.ui.status(_("adding changesets\n"))
1906 self.ui.status(_("adding changesets\n"))
1928 cor = len(cl) - 1
1907 cor = len(cl) - 1
1929 chunkiter = changegroup.chunkiter(source)
1908 chunkiter = changegroup.chunkiter(source)
1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1909 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1931 raise util.Abort(_("received changelog group is empty"))
1910 raise util.Abort(_("received changelog group is empty"))
1932 cnr = len(cl) - 1
1911 cnr = len(cl) - 1
1933 changesets = cnr - cor
1912 changesets = cnr - cor
1934
1913
1935 # pull off the manifest group
1914 # pull off the manifest group
1936 self.ui.status(_("adding manifests\n"))
1915 self.ui.status(_("adding manifests\n"))
1937 chunkiter = changegroup.chunkiter(source)
1916 chunkiter = changegroup.chunkiter(source)
1938 # no need to check for empty manifest group here:
1917 # no need to check for empty manifest group here:
1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1918 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1940 # no new manifest will be created and the manifest group will
1919 # no new manifest will be created and the manifest group will
1941 # be empty during the pull
1920 # be empty during the pull
1942 self.manifest.addgroup(chunkiter, revmap, trp)
1921 self.manifest.addgroup(chunkiter, revmap, trp)
1943
1922
1944 # process the files
1923 # process the files
1945 self.ui.status(_("adding file changes\n"))
1924 self.ui.status(_("adding file changes\n"))
1946 while 1:
1925 while 1:
1947 f = changegroup.getchunk(source)
1926 f = changegroup.getchunk(source)
1948 if not f:
1927 if not f:
1949 break
1928 break
1950 self.ui.debug(_("adding %s revisions\n") % f)
1929 self.ui.debug(_("adding %s revisions\n") % f)
1951 fl = self.file(f)
1930 fl = self.file(f)
1952 o = len(fl)
1931 o = len(fl)
1953 chunkiter = changegroup.chunkiter(source)
1932 chunkiter = changegroup.chunkiter(source)
1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1933 if fl.addgroup(chunkiter, revmap, trp) is None:
1955 raise util.Abort(_("received file revlog group is empty"))
1934 raise util.Abort(_("received file revlog group is empty"))
1956 revisions += len(fl) - o
1935 revisions += len(fl) - o
1957 files += 1
1936 files += 1
1958
1937
1959 # make changelog see real files again
1938 # make changelog see real files again
1960 cl.finalize(trp)
1939 cl.finalize(trp)
1961
1940
1962 newheads = len(self.changelog.heads())
1941 newheads = len(self.changelog.heads())
1963 heads = ""
1942 heads = ""
1964 if oldheads and newheads != oldheads:
1943 if oldheads and newheads != oldheads:
1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1944 heads = _(" (%+d heads)") % (newheads - oldheads)
1966
1945
1967 self.ui.status(_("added %d changesets"
1946 self.ui.status(_("added %d changesets"
1968 " with %d changes to %d files%s\n")
1947 " with %d changes to %d files%s\n")
1969 % (changesets, revisions, files, heads))
1948 % (changesets, revisions, files, heads))
1970
1949
1971 if changesets > 0:
1950 if changesets > 0:
1972 self.hook('pretxnchangegroup', throw=True,
1951 self.hook('pretxnchangegroup', throw=True,
1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1952 node=hex(self.changelog.node(cor+1)), source=srctype,
1974 url=url)
1953 url=url)
1975
1954
1976 tr.close()
1955 tr.close()
1977 finally:
1956 finally:
1978 del tr
1957 del tr
1979
1958
1980 if changesets > 0:
1959 if changesets > 0:
1981 # forcefully update the on-disk branch cache
1960 # forcefully update the on-disk branch cache
1982 self.ui.debug(_("updating the branch cache\n"))
1961 self.ui.debug(_("updating the branch cache\n"))
1983 self.branchtags()
1962 self.branchtags()
1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1963 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1985 source=srctype, url=url)
1964 source=srctype, url=url)
1986
1965
1987 for i in xrange(cor + 1, cnr + 1):
1966 for i in xrange(cor + 1, cnr + 1):
1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1967 self.hook("incoming", node=hex(self.changelog.node(i)),
1989 source=srctype, url=url)
1968 source=srctype, url=url)
1990
1969
1991 # never return 0 here:
1970 # never return 0 here:
1992 if newheads < oldheads:
1971 if newheads < oldheads:
1993 return newheads - oldheads - 1
1972 return newheads - oldheads - 1
1994 else:
1973 else:
1995 return newheads - oldheads + 1
1974 return newheads - oldheads + 1
1996
1975
1997
1976
1998 def stream_in(self, remote):
1977 def stream_in(self, remote):
1999 fp = remote.stream_out()
1978 fp = remote.stream_out()
2000 l = fp.readline()
1979 l = fp.readline()
2001 try:
1980 try:
2002 resp = int(l)
1981 resp = int(l)
2003 except ValueError:
1982 except ValueError:
2004 raise util.UnexpectedOutput(
1983 raise util.UnexpectedOutput(
2005 _('Unexpected response from remote server:'), l)
1984 _('Unexpected response from remote server:'), l)
2006 if resp == 1:
1985 if resp == 1:
2007 raise util.Abort(_('operation forbidden by server'))
1986 raise util.Abort(_('operation forbidden by server'))
2008 elif resp == 2:
1987 elif resp == 2:
2009 raise util.Abort(_('locking the remote repository failed'))
1988 raise util.Abort(_('locking the remote repository failed'))
2010 elif resp != 0:
1989 elif resp != 0:
2011 raise util.Abort(_('the server sent an unknown error code'))
1990 raise util.Abort(_('the server sent an unknown error code'))
2012 self.ui.status(_('streaming all changes\n'))
1991 self.ui.status(_('streaming all changes\n'))
2013 l = fp.readline()
1992 l = fp.readline()
2014 try:
1993 try:
2015 total_files, total_bytes = map(int, l.split(' ', 1))
1994 total_files, total_bytes = map(int, l.split(' ', 1))
2016 except (ValueError, TypeError):
1995 except (ValueError, TypeError):
2017 raise util.UnexpectedOutput(
1996 raise util.UnexpectedOutput(
2018 _('Unexpected response from remote server:'), l)
1997 _('Unexpected response from remote server:'), l)
2019 self.ui.status(_('%d files to transfer, %s of data\n') %
1998 self.ui.status(_('%d files to transfer, %s of data\n') %
2020 (total_files, util.bytecount(total_bytes)))
1999 (total_files, util.bytecount(total_bytes)))
2021 start = time.time()
2000 start = time.time()
2022 for i in xrange(total_files):
2001 for i in xrange(total_files):
2023 # XXX doesn't support '\n' or '\r' in filenames
2002 # XXX doesn't support '\n' or '\r' in filenames
2024 l = fp.readline()
2003 l = fp.readline()
2025 try:
2004 try:
2026 name, size = l.split('\0', 1)
2005 name, size = l.split('\0', 1)
2027 size = int(size)
2006 size = int(size)
2028 except ValueError, TypeError:
2007 except ValueError, TypeError:
2029 raise util.UnexpectedOutput(
2008 raise util.UnexpectedOutput(
2030 _('Unexpected response from remote server:'), l)
2009 _('Unexpected response from remote server:'), l)
2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2010 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2032 ofp = self.sopener(name, 'w')
2011 ofp = self.sopener(name, 'w')
2033 for chunk in util.filechunkiter(fp, limit=size):
2012 for chunk in util.filechunkiter(fp, limit=size):
2034 ofp.write(chunk)
2013 ofp.write(chunk)
2035 ofp.close()
2014 ofp.close()
2036 elapsed = time.time() - start
2015 elapsed = time.time() - start
2037 if elapsed <= 0:
2016 if elapsed <= 0:
2038 elapsed = 0.001
2017 elapsed = 0.001
2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2018 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2040 (util.bytecount(total_bytes), elapsed,
2019 (util.bytecount(total_bytes), elapsed,
2041 util.bytecount(total_bytes / elapsed)))
2020 util.bytecount(total_bytes / elapsed)))
2042 self.invalidate()
2021 self.invalidate()
2043 return len(self.heads()) + 1
2022 return len(self.heads()) + 1
2044
2023
2045 def clone(self, remote, heads=[], stream=False):
2024 def clone(self, remote, heads=[], stream=False):
2046 '''clone remote repository.
2025 '''clone remote repository.
2047
2026
2048 keyword arguments:
2027 keyword arguments:
2049 heads: list of revs to clone (forces use of pull)
2028 heads: list of revs to clone (forces use of pull)
2050 stream: use streaming clone if possible'''
2029 stream: use streaming clone if possible'''
2051
2030
2052 # now, all clients that can request uncompressed clones can
2031 # now, all clients that can request uncompressed clones can
2053 # read repo formats supported by all servers that can serve
2032 # read repo formats supported by all servers that can serve
2054 # them.
2033 # them.
2055
2034
2056 # if revlog format changes, client will have to check version
2035 # if revlog format changes, client will have to check version
2057 # and format flags on "stream" capability, and use
2036 # and format flags on "stream" capability, and use
2058 # uncompressed only if compatible.
2037 # uncompressed only if compatible.
2059
2038
2060 if stream and not heads and remote.capable('stream'):
2039 if stream and not heads and remote.capable('stream'):
2061 return self.stream_in(remote)
2040 return self.stream_in(remote)
2062 return self.pull(remote, heads)
2041 return self.pull(remote, heads)
2063
2042
2043 def storefiles(self):
2044 '''get all *.i and *.d files in the store
2045
2046 Returns (list of (filename, size), total_bytes)'''
2047
2048 lock = None
2049 try:
2050 self.ui.debug('scanning\n')
2051 entries = []
2052 total_bytes = 0
2053 # get consistent snapshot of repo, lock during scan
2054 lock = self.lock()
2055 for name, size in self.store.walk():
2056 entries.append((name, size))
2057 total_bytes += size
2058 return entries, total_bytes
2059 finally:
2060 del lock
2061
2064 # used to avoid circular references so destructors work
2062 # used to avoid circular references so destructors work
2065 def aftertrans(files):
2063 def aftertrans(files):
2066 renamefiles = [tuple(t) for t in files]
2064 renamefiles = [tuple(t) for t in files]
2067 def a():
2065 def a():
2068 for src, dest in renamefiles:
2066 for src, dest in renamefiles:
2069 util.rename(src, dest)
2067 util.rename(src, dest)
2070 return a
2068 return a
2071
2069
2072 def instance(ui, path, create):
2070 def instance(ui, path, create):
2073 return localrepository(ui, util.drop_scheme('file', path), create)
2071 return localrepository(ui, util.drop_scheme('file', path), create)
2074
2072
2075 def islocal(path):
2073 def islocal(path):
2076 return True
2074 return True
@@ -1,83 +1,82
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, httprangereader
11 import changelog, httprangereader
12 import repo, localrepo, manifest, util, store
12 import repo, localrepo, manifest, util, store
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 num = inst.code == 404 and errno.ENOENT or None
20 num = inst.code == 404 and errno.ENOENT or None
21 raise IOError(num, inst)
21 raise IOError(num, inst)
22 except urllib2.URLError, inst:
22 except urllib2.URLError, inst:
23 raise IOError(None, inst.reason[1])
23 raise IOError(None, inst.reason[1])
24
24
25 def opener(base):
25 def opener(base):
26 """return a function that opens files over http"""
26 """return a function that opens files over http"""
27 p = base
27 p = base
28 def o(path, mode="r"):
28 def o(path, mode="r"):
29 f = "/".join((p, urllib.quote(path)))
29 f = "/".join((p, urllib.quote(path)))
30 return rangereader(f)
30 return rangereader(f)
31 return o
31 return o
32
32
33 class statichttprepository(localrepo.localrepository):
33 class statichttprepository(localrepo.localrepository):
34 def __init__(self, ui, path):
34 def __init__(self, ui, path):
35 self._url = path
35 self._url = path
36 self.ui = ui
36 self.ui = ui
37
37
38 self.path = path.rstrip('/') + "/.hg"
38 self.path = path.rstrip('/') + "/.hg"
39 self.opener = opener(self.path)
39 self.opener = opener(self.path)
40
40
41 # find requirements
41 # find requirements
42 try:
42 try:
43 requirements = self.opener("requires").read().splitlines()
43 requirements = self.opener("requires").read().splitlines()
44 except IOError, inst:
44 except IOError, inst:
45 if inst.errno == errno.ENOENT:
45 if inst.errno == errno.ENOENT:
46 msg = _("'%s' does not appear to be an hg repository") % path
46 msg = _("'%s' does not appear to be an hg repository") % path
47 raise repo.RepoError(msg)
47 raise repo.RepoError(msg)
48 else:
48 else:
49 requirements = []
49 requirements = []
50
50
51 # check them
51 # check them
52 for r in requirements:
52 for r in requirements:
53 if r not in self.supported:
53 if r not in self.supported:
54 raise repo.RepoError(_("requirement '%s' not supported") % r)
54 raise repo.RepoError(_("requirement '%s' not supported") % r)
55
55
56 # setup store
56 # setup store
57 if "store" in requirements:
57 if "store" in requirements:
58 self.encodefn = store.encodefilename
59 self.decodefn = store.decodefilename
60 self.spath = self.path + "/store"
58 self.spath = self.path + "/store"
61 else:
59 else:
62 self.encodefn = lambda x: x
63 self.decodefn = lambda x: x
64 self.spath = self.path
60 self.spath = self.path
65 self.sopener = store.encodedopener(opener(self.spath), self.encodefn)
61 self.encodefn = store.encodefn(requirements)
62 so = opener(self.spath)
63 self.sopener = lambda path, *args, **kw: so(
64 self.encodefn(path), *args, **kw)
66
65
67 self.manifest = manifest.manifest(self.sopener)
66 self.manifest = manifest.manifest(self.sopener)
68 self.changelog = changelog.changelog(self.sopener)
67 self.changelog = changelog.changelog(self.sopener)
69 self.tagscache = None
68 self.tagscache = None
70 self.nodetagscache = None
69 self.nodetagscache = None
71 self.encodepats = None
70 self.encodepats = None
72 self.decodepats = None
71 self.decodepats = None
73
72
74 def url(self):
73 def url(self):
75 return 'static-' + self._url
74 return 'static-' + self._url
76
75
77 def local(self):
76 def local(self):
78 return False
77 return False
79
78
80 def instance(ui, path, create):
79 def instance(ui, path, create):
81 if create:
80 if create:
82 raise util.Abort(_('cannot create new static-http repository'))
81 raise util.Abort(_('cannot create new static-http repository'))
83 return statichttprepository(ui, path[7:])
82 return statichttprepository(ui, path[7:])
@@ -1,39 +1,125
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, stat, osutil, util
9
8 def _buildencodefun():
10 def _buildencodefun():
9 e = '_'
11 e = '_'
10 win_reserved = [ord(x) for x in '\\:*?"<>|']
12 win_reserved = [ord(x) for x in '\\:*?"<>|']
11 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
13 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
12 for x in (range(32) + range(126, 256) + win_reserved):
14 for x in (range(32) + range(126, 256) + win_reserved):
13 cmap[chr(x)] = "~%02x" % x
15 cmap[chr(x)] = "~%02x" % x
14 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
16 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
15 cmap[chr(x)] = e + chr(x).lower()
17 cmap[chr(x)] = e + chr(x).lower()
16 dmap = {}
18 dmap = {}
17 for k, v in cmap.iteritems():
19 for k, v in cmap.iteritems():
18 dmap[v] = k
20 dmap[v] = k
19 def decode(s):
21 def decode(s):
20 i = 0
22 i = 0
21 while i < len(s):
23 while i < len(s):
22 for l in xrange(1, 4):
24 for l in xrange(1, 4):
23 try:
25 try:
24 yield dmap[s[i:i+l]]
26 yield dmap[s[i:i+l]]
25 i += l
27 i += l
26 break
28 break
27 except KeyError:
29 except KeyError:
28 pass
30 pass
29 else:
31 else:
30 raise KeyError
32 raise KeyError
31 return (lambda s: "".join([cmap[c] for c in s]),
33 return (lambda s: "".join([cmap[c] for c in s]),
32 lambda s: "".join(list(decode(s))))
34 lambda s: "".join(list(decode(s))))
33
35
34 encodefilename, decodefilename = _buildencodefun()
36 encodefilename, decodefilename = _buildencodefun()
35
37
36 def encodedopener(openerfn, fn):
38 def _dirwalk(path, recurse):
37 def o(path, *args, **kw):
39 '''yields (filename, size)'''
38 return openerfn(fn(path), *args, **kw)
40 for e, kind, st in osutil.listdir(path, stat=True):
39 return o
41 pe = os.path.join(path, e)
42 if kind == stat.S_IFDIR:
43 if recurse:
44 for x in _dirwalk(pe, True):
45 yield x
46 elif kind == stat.S_IFREG:
47 yield pe, st.st_size
48
49 class _store:
50 '''base class for local repository stores'''
51 def __init__(self, path):
52 self.path = path
53 try:
54 # files in .hg/ will be created using this mode
55 mode = os.stat(self.path).st_mode
56 # avoid some useless chmods
57 if (0777 & ~util._umask) == (0777 & mode):
58 mode = None
59 except OSError:
60 mode = None
61 self.createmode = mode
62
63 def join(self, f):
64 return os.path.join(self.path, f)
65
66 def _revlogfiles(self, relpath='', recurse=False):
67 '''yields (filename, size)'''
68 if relpath:
69 path = os.path.join(self.path, relpath)
70 else:
71 path = self.path
72 striplen = len(self.path) + len(os.sep)
73 filetypes = ('.d', '.i')
74 for f, size in _dirwalk(path, recurse):
75 if (len(f) > 2) and f[-2:] in filetypes:
76 yield util.pconvert(f[striplen:]), size
77
78 def _datafiles(self):
79 for x in self._revlogfiles('data', True):
80 yield x
81
82 def walk(self):
83 '''yields (direncoded filename, size)'''
84 # yield data files first
85 for x in self._datafiles():
86 yield x
87 # yield manifest before changelog
88 meta = util.sort(self._revlogfiles())
89 meta.reverse()
90 for x in meta:
91 yield x
92
93 class directstore(_store):
94 def __init__(self, path):
95 _store.__init__(self, path)
96 self.encodefn = lambda x: x
97 self.opener = util.opener(self.path)
98 self.opener.createmode = self.createmode
99
100 class encodedstore(_store):
101 def __init__(self, path):
102 _store.__init__(self, os.path.join(path, 'store'))
103 self.encodefn = encodefilename
104 op = util.opener(self.path)
105 op.createmode = self.createmode
106 self.opener = lambda f, *args, **kw: op(self.encodefn(f), *args, **kw)
107
108 def _datafiles(self):
109 for f, size in self._revlogfiles('data', True):
110 yield decodefilename(f), size
111
112 def join(self, f):
113 return os.path.join(self.path, self.encodefn(f))
114
115 def encodefn(requirements):
116 if 'store' not in requirements:
117 return lambda x: x
118 else:
119 return encodefilename
120
121 def store(requirements, path):
122 if 'store' not in requirements:
123 return directstore(path)
124 else:
125 return encodedstore(path)
@@ -1,93 +1,51
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, osutil, stat, util, lock
8 import util, lock
9
9
10 # if server supports streaming clone, it advertises "stream"
10 # if server supports streaming clone, it advertises "stream"
11 # capability with value that is version+flags of repo it is serving.
11 # capability with value that is version+flags of repo it is serving.
12 # client only streams if it can read that repo format.
12 # client only streams if it can read that repo format.
13
13
14 def walkrepo(root):
15 '''iterate over metadata files in repository.
16 walk in natural (sorted) order.
17 yields 2-tuples: name of .d or .i file, size of file.'''
18
19 strip_count = len(root) + len(os.sep)
20 def walk(path, recurse):
21 for e, kind, st in osutil.listdir(path, stat=True):
22 pe = os.path.join(path, e)
23 if kind == stat.S_IFDIR:
24 if recurse:
25 for x in walk(pe, True):
26 yield x
27 else:
28 if kind != stat.S_IFREG or len(e) < 2:
29 continue
30 sfx = e[-2:]
31 if sfx in ('.d', '.i'):
32 yield pe[strip_count:], st.st_size
33 # write file data first
34 for x in walk(os.path.join(root, 'data'), True):
35 yield x
36 # write manifest before changelog
37 meta = util.sort(walk(root, False))
38 meta.reverse()
39 for x in meta:
40 yield x
41
42 # stream file format is simple.
14 # stream file format is simple.
43 #
15 #
44 # server writes out line that says how many files, how many total
16 # server writes out line that says how many files, how many total
45 # bytes. separator is ascii space, byte counts are strings.
17 # bytes. separator is ascii space, byte counts are strings.
46 #
18 #
47 # then for each file:
19 # then for each file:
48 #
20 #
49 # server writes out line that says file name, how many bytes in
21 # server writes out line that says file name, how many bytes in
50 # file. separator is ascii nul, byte count is string.
22 # file. separator is ascii nul, byte count is string.
51 #
23 #
52 # server writes out raw file data.
24 # server writes out raw file data.
53
25
54 def stream_out(repo, fileobj, untrusted=False):
26 def stream_out(repo, fileobj, untrusted=False):
55 '''stream out all metadata files in repository.
27 '''stream out all metadata files in repository.
56 writes to file-like object, must support write() and optional flush().'''
28 writes to file-like object, must support write() and optional flush().'''
57
29
58 if not repo.ui.configbool('server', 'uncompressed', untrusted=untrusted):
30 if not repo.ui.configbool('server', 'uncompressed', untrusted=untrusted):
59 fileobj.write('1\n')
31 fileobj.write('1\n')
60 return
32 return
61
33
62 # get consistent snapshot of repo. lock during scan so lock not
63 # needed while we stream, and commits can happen.
64 repolock = None
65 try:
34 try:
66 try:
35 entries, total_bytes = repo.storefiles()
67 repolock = repo.lock()
36 except (lock.LockHeld, lock.LockUnavailable), inst:
68 except (lock.LockHeld, lock.LockUnavailable), inst:
37 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
69 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
38 fileobj.write('2\n')
70 fileobj.write('2\n')
39 return
71 return
72
40
73 fileobj.write('0\n')
41 fileobj.write('0\n')
74 repo.ui.debug('scanning\n')
75 entries = []
76 total_bytes = 0
77 for name, size in walkrepo(repo.spath):
78 name = repo.decodefn(util.pconvert(name))
79 entries.append((name, size))
80 total_bytes += size
81 finally:
82 del repolock
83
84 repo.ui.debug('%d files, %d bytes to transfer\n' %
42 repo.ui.debug('%d files, %d bytes to transfer\n' %
85 (len(entries), total_bytes))
43 (len(entries), total_bytes))
86 fileobj.write('%d %d\n' % (len(entries), total_bytes))
44 fileobj.write('%d %d\n' % (len(entries), total_bytes))
87 for name, size in entries:
45 for name, size in entries:
88 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
46 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
89 fileobj.write('%s\0%d\n' % (name, size))
47 fileobj.write('%s\0%d\n' % (name, size))
90 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
48 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
91 fileobj.write(chunk)
49 fileobj.write(chunk)
92 flush = getattr(fileobj, 'flush', None)
50 flush = getattr(fileobj, 'flush', None)
93 if flush: flush()
51 if flush: flush()
General Comments 0
You need to be logged in to leave comments. Login now