##// END OF EJS Templates
manifest: move pure parsing code out of pure...
Matt Mackall -
r24215:feddc528 default
parent child Browse files
Show More
@@ -1,304 +1,318 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util
9 import mdiff, parsers, error, revlog, util
10 import array, struct
10 import array, struct
11
11
12 class manifestdict(dict):
12 class manifestdict(dict):
13 def __init__(self):
13 def __init__(self):
14 self._flags = {}
14 self._flags = {}
15 def __setitem__(self, k, v):
15 def __setitem__(self, k, v):
16 assert v is not None
16 assert v is not None
17 dict.__setitem__(self, k, v)
17 dict.__setitem__(self, k, v)
18 def flags(self, f):
18 def flags(self, f):
19 return self._flags.get(f, "")
19 return self._flags.get(f, "")
20 def setflag(self, f, flags):
20 def setflag(self, f, flags):
21 """Set the flags (symlink, executable) for path f."""
21 """Set the flags (symlink, executable) for path f."""
22 self._flags[f] = flags
22 self._flags[f] = flags
23 def copy(self):
23 def copy(self):
24 copy = manifestdict()
24 copy = manifestdict()
25 dict.__init__(copy, self)
25 dict.__init__(copy, self)
26 copy._flags = dict.copy(self._flags)
26 copy._flags = dict.copy(self._flags)
27 return copy
27 return copy
28 def intersectfiles(self, files):
28 def intersectfiles(self, files):
29 '''make a new manifestdict with the intersection of self with files
29 '''make a new manifestdict with the intersection of self with files
30
30
31 The algorithm assumes that files is much smaller than self.'''
31 The algorithm assumes that files is much smaller than self.'''
32 ret = manifestdict()
32 ret = manifestdict()
33 for fn in files:
33 for fn in files:
34 if fn in self:
34 if fn in self:
35 ret[fn] = self[fn]
35 ret[fn] = self[fn]
36 flags = self._flags.get(fn, None)
36 flags = self._flags.get(fn, None)
37 if flags:
37 if flags:
38 ret._flags[fn] = flags
38 ret._flags[fn] = flags
39 return ret
39 return ret
40
40
41 def filesnotin(self, m2):
41 def filesnotin(self, m2):
42 '''Set of files in this manifest that are not in the other'''
42 '''Set of files in this manifest that are not in the other'''
43 files = set(self.iterkeys())
43 files = set(self.iterkeys())
44 files.difference_update(m2.iterkeys())
44 files.difference_update(m2.iterkeys())
45 return files
45 return files
46
46
47 def matches(self, match):
47 def matches(self, match):
48 '''generate a new manifest filtered by the match argument'''
48 '''generate a new manifest filtered by the match argument'''
49 if match.always():
49 if match.always():
50 return self.copy()
50 return self.copy()
51
51
52 files = match.files()
52 files = match.files()
53 if (match.matchfn == match.exact or
53 if (match.matchfn == match.exact or
54 (not match.anypats() and util.all(fn in self for fn in files))):
54 (not match.anypats() and util.all(fn in self for fn in files))):
55 return self.intersectfiles(files)
55 return self.intersectfiles(files)
56
56
57 m = self.copy()
57 m = self.copy()
58 for fn in m.keys():
58 for fn in m.keys():
59 if not match(fn):
59 if not match(fn):
60 del m[fn]
60 del m[fn]
61 return m
61 return m
62
62
63 def diff(self, m2, clean=False):
63 def diff(self, m2, clean=False):
64 '''Finds changes between the current manifest and m2.
64 '''Finds changes between the current manifest and m2.
65
65
66 Args:
66 Args:
67 m2: the manifest to which this manifest should be compared.
67 m2: the manifest to which this manifest should be compared.
68 clean: if true, include files unchanged between these manifests
68 clean: if true, include files unchanged between these manifests
69 with a None value in the returned dictionary.
69 with a None value in the returned dictionary.
70
70
71 The result is returned as a dict with filename as key and
71 The result is returned as a dict with filename as key and
72 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
72 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
73 nodeid in the current/other manifest and fl1/fl2 is the flag
73 nodeid in the current/other manifest and fl1/fl2 is the flag
74 in the current/other manifest. Where the file does not exist,
74 in the current/other manifest. Where the file does not exist,
75 the nodeid will be None and the flags will be the empty
75 the nodeid will be None and the flags will be the empty
76 string.
76 string.
77 '''
77 '''
78 diff = {}
78 diff = {}
79
79
80 for fn, n1 in self.iteritems():
80 for fn, n1 in self.iteritems():
81 fl1 = self._flags.get(fn, '')
81 fl1 = self._flags.get(fn, '')
82 n2 = m2.get(fn, None)
82 n2 = m2.get(fn, None)
83 fl2 = m2._flags.get(fn, '')
83 fl2 = m2._flags.get(fn, '')
84 if n2 is None:
84 if n2 is None:
85 fl2 = ''
85 fl2 = ''
86 if n1 != n2 or fl1 != fl2:
86 if n1 != n2 or fl1 != fl2:
87 diff[fn] = ((n1, fl1), (n2, fl2))
87 diff[fn] = ((n1, fl1), (n2, fl2))
88 elif clean:
88 elif clean:
89 diff[fn] = None
89 diff[fn] = None
90
90
91 for fn, n2 in m2.iteritems():
91 for fn, n2 in m2.iteritems():
92 if fn not in self:
92 if fn not in self:
93 fl2 = m2._flags.get(fn, '')
93 fl2 = m2._flags.get(fn, '')
94 diff[fn] = ((None, ''), (n2, fl2))
94 diff[fn] = ((None, ''), (n2, fl2))
95
95
96 return diff
96 return diff
97
97
98 def text(self):
98 def text(self):
99 """Get the full data of this manifest as a bytestring."""
99 """Get the full data of this manifest as a bytestring."""
100 fl = sorted(self)
100 fl = sorted(self)
101 _checkforbidden(fl)
101 _checkforbidden(fl)
102
102
103 hex, flags = revlog.hex, self.flags
103 hex, flags = revlog.hex, self.flags
104 # if this is changed to support newlines in filenames,
104 # if this is changed to support newlines in filenames,
105 # be sure to check the templates/ dir again (especially *-raw.tmpl)
105 # be sure to check the templates/ dir again (especially *-raw.tmpl)
106 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
106 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
107
107
108 def fastdelta(self, base, changes):
108 def fastdelta(self, base, changes):
109 """Given a base manifest text as an array.array and a list of changes
109 """Given a base manifest text as an array.array and a list of changes
110 relative to that text, compute a delta that can be used by revlog.
110 relative to that text, compute a delta that can be used by revlog.
111 """
111 """
112 delta = []
112 delta = []
113 dstart = None
113 dstart = None
114 dend = None
114 dend = None
115 dline = [""]
115 dline = [""]
116 start = 0
116 start = 0
117 # zero copy representation of base as a buffer
117 # zero copy representation of base as a buffer
118 addbuf = util.buffer(base)
118 addbuf = util.buffer(base)
119
119
120 # start with a readonly loop that finds the offset of
120 # start with a readonly loop that finds the offset of
121 # each line and creates the deltas
121 # each line and creates the deltas
122 for f, todelete in changes:
122 for f, todelete in changes:
123 # bs will either be the index of the item or the insert point
123 # bs will either be the index of the item or the insert point
124 start, end = _msearch(addbuf, f, start)
124 start, end = _msearch(addbuf, f, start)
125 if not todelete:
125 if not todelete:
126 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
126 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
127 else:
127 else:
128 if start == end:
128 if start == end:
129 # item we want to delete was not found, error out
129 # item we want to delete was not found, error out
130 raise AssertionError(
130 raise AssertionError(
131 _("failed to remove %s from manifest") % f)
131 _("failed to remove %s from manifest") % f)
132 l = ""
132 l = ""
133 if dstart is not None and dstart <= start and dend >= start:
133 if dstart is not None and dstart <= start and dend >= start:
134 if dend < end:
134 if dend < end:
135 dend = end
135 dend = end
136 if l:
136 if l:
137 dline.append(l)
137 dline.append(l)
138 else:
138 else:
139 if dstart is not None:
139 if dstart is not None:
140 delta.append([dstart, dend, "".join(dline)])
140 delta.append([dstart, dend, "".join(dline)])
141 dstart = start
141 dstart = start
142 dend = end
142 dend = end
143 dline = [l]
143 dline = [l]
144
144
145 if dstart is not None:
145 if dstart is not None:
146 delta.append([dstart, dend, "".join(dline)])
146 delta.append([dstart, dend, "".join(dline)])
147 # apply the delta to the base, and get a delta for addrevision
147 # apply the delta to the base, and get a delta for addrevision
148 deltatext, arraytext = _addlistdelta(base, delta)
148 deltatext, arraytext = _addlistdelta(base, delta)
149 return arraytext, deltatext
149 return arraytext, deltatext
150
150
151 def _msearch(m, s, lo=0, hi=None):
151 def _msearch(m, s, lo=0, hi=None):
152 '''return a tuple (start, end) that says where to find s within m.
152 '''return a tuple (start, end) that says where to find s within m.
153
153
154 If the string is found m[start:end] are the line containing
154 If the string is found m[start:end] are the line containing
155 that string. If start == end the string was not found and
155 that string. If start == end the string was not found and
156 they indicate the proper sorted insertion point.
156 they indicate the proper sorted insertion point.
157
157
158 m should be a buffer or a string
158 m should be a buffer or a string
159 s is a string'''
159 s is a string'''
160 def advance(i, c):
160 def advance(i, c):
161 while i < lenm and m[i] != c:
161 while i < lenm and m[i] != c:
162 i += 1
162 i += 1
163 return i
163 return i
164 if not s:
164 if not s:
165 return (lo, lo)
165 return (lo, lo)
166 lenm = len(m)
166 lenm = len(m)
167 if not hi:
167 if not hi:
168 hi = lenm
168 hi = lenm
169 while lo < hi:
169 while lo < hi:
170 mid = (lo + hi) // 2
170 mid = (lo + hi) // 2
171 start = mid
171 start = mid
172 while start > 0 and m[start - 1] != '\n':
172 while start > 0 and m[start - 1] != '\n':
173 start -= 1
173 start -= 1
174 end = advance(start, '\0')
174 end = advance(start, '\0')
175 if m[start:end] < s:
175 if m[start:end] < s:
176 # we know that after the null there are 40 bytes of sha1
176 # we know that after the null there are 40 bytes of sha1
177 # this translates to the bisect lo = mid + 1
177 # this translates to the bisect lo = mid + 1
178 lo = advance(end + 40, '\n') + 1
178 lo = advance(end + 40, '\n') + 1
179 else:
179 else:
180 # this translates to the bisect hi = mid
180 # this translates to the bisect hi = mid
181 hi = start
181 hi = start
182 end = advance(lo, '\0')
182 end = advance(lo, '\0')
183 found = m[lo:end]
183 found = m[lo:end]
184 if s == found:
184 if s == found:
185 # we know that after the null there are 40 bytes of sha1
185 # we know that after the null there are 40 bytes of sha1
186 end = advance(end + 40, '\n')
186 end = advance(end + 40, '\n')
187 return (lo, end + 1)
187 return (lo, end + 1)
188 else:
188 else:
189 return (lo, lo)
189 return (lo, lo)
190
190
191 def _checkforbidden(l):
191 def _checkforbidden(l):
192 """Check filenames for illegal characters."""
192 """Check filenames for illegal characters."""
193 for f in l:
193 for f in l:
194 if '\n' in f or '\r' in f:
194 if '\n' in f or '\r' in f:
195 raise error.RevlogError(
195 raise error.RevlogError(
196 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
196 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
197
197
198
198
199 # apply the changes collected during the bisect loop to our addlist
199 # apply the changes collected during the bisect loop to our addlist
200 # return a delta suitable for addrevision
200 # return a delta suitable for addrevision
201 def _addlistdelta(addlist, x):
201 def _addlistdelta(addlist, x):
202 # for large addlist arrays, building a new array is cheaper
202 # for large addlist arrays, building a new array is cheaper
203 # than repeatedly modifying the existing one
203 # than repeatedly modifying the existing one
204 currentposition = 0
204 currentposition = 0
205 newaddlist = array.array('c')
205 newaddlist = array.array('c')
206
206
207 for start, end, content in x:
207 for start, end, content in x:
208 newaddlist += addlist[currentposition:start]
208 newaddlist += addlist[currentposition:start]
209 if content:
209 if content:
210 newaddlist += array.array('c', content)
210 newaddlist += array.array('c', content)
211
211
212 currentposition = end
212 currentposition = end
213
213
214 newaddlist += addlist[currentposition:]
214 newaddlist += addlist[currentposition:]
215
215
216 deltatext = "".join(struct.pack(">lll", start, end, len(content))
216 deltatext = "".join(struct.pack(">lll", start, end, len(content))
217 + content for start, end, content in x)
217 + content for start, end, content in x)
218 return deltatext, newaddlist
218 return deltatext, newaddlist
219
219
220 # Pure Python fallback
221 def _parsemanifest(mfdict, fdict, lines):
222 bin = revlog.bin
223 for l in lines.splitlines():
224 f, n = l.split('\0')
225 if len(n) > 40:
226 fdict[f] = n[40:]
227 mfdict[f] = bin(n[:40])
228 else:
229 mfdict[f] = bin(n)
230
220 def _parse(lines):
231 def _parse(lines):
221 mfdict = manifestdict()
232 mfdict = manifestdict()
222 parsers.parse_manifest(mfdict, mfdict._flags, lines)
233 try:
234 parsers.parse_manifest(mfdict, mfdict._flags, lines)
235 except AttributeError:
236 _parsemanifest(mfdict, mfdict._flags, lines)
223 return mfdict
237 return mfdict
224
238
225 class manifest(revlog.revlog):
239 class manifest(revlog.revlog):
226 def __init__(self, opener):
240 def __init__(self, opener):
227 # During normal operations, we expect to deal with not more than four
241 # During normal operations, we expect to deal with not more than four
228 # revs at a time (such as during commit --amend). When rebasing large
242 # revs at a time (such as during commit --amend). When rebasing large
229 # stacks of commits, the number can go up, hence the config knob below.
243 # stacks of commits, the number can go up, hence the config knob below.
230 cachesize = 4
244 cachesize = 4
231 opts = getattr(opener, 'options', None)
245 opts = getattr(opener, 'options', None)
232 if opts is not None:
246 if opts is not None:
233 cachesize = opts.get('manifestcachesize', cachesize)
247 cachesize = opts.get('manifestcachesize', cachesize)
234 self._mancache = util.lrucachedict(cachesize)
248 self._mancache = util.lrucachedict(cachesize)
235 revlog.revlog.__init__(self, opener, "00manifest.i")
249 revlog.revlog.__init__(self, opener, "00manifest.i")
236
250
237 def readdelta(self, node):
251 def readdelta(self, node):
238 r = self.rev(node)
252 r = self.rev(node)
239 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
253 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
240
254
241 def readfast(self, node):
255 def readfast(self, node):
242 '''use the faster of readdelta or read'''
256 '''use the faster of readdelta or read'''
243 r = self.rev(node)
257 r = self.rev(node)
244 deltaparent = self.deltaparent(r)
258 deltaparent = self.deltaparent(r)
245 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
259 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
246 return self.readdelta(node)
260 return self.readdelta(node)
247 return self.read(node)
261 return self.read(node)
248
262
249 def read(self, node):
263 def read(self, node):
250 if node == revlog.nullid:
264 if node == revlog.nullid:
251 return manifestdict() # don't upset local cache
265 return manifestdict() # don't upset local cache
252 if node in self._mancache:
266 if node in self._mancache:
253 return self._mancache[node][0]
267 return self._mancache[node][0]
254 text = self.revision(node)
268 text = self.revision(node)
255 arraytext = array.array('c', text)
269 arraytext = array.array('c', text)
256 m = _parse(text)
270 m = _parse(text)
257 self._mancache[node] = (m, arraytext)
271 self._mancache[node] = (m, arraytext)
258 return m
272 return m
259
273
260 def find(self, node, f):
274 def find(self, node, f):
261 '''look up entry for a single file efficiently.
275 '''look up entry for a single file efficiently.
262 return (node, flags) pair if found, (None, None) if not.'''
276 return (node, flags) pair if found, (None, None) if not.'''
263 if node in self._mancache:
277 if node in self._mancache:
264 m = self._mancache[node][0]
278 m = self._mancache[node][0]
265 return m.get(f), m.flags(f)
279 return m.get(f), m.flags(f)
266 text = self.revision(node)
280 text = self.revision(node)
267 start, end = _msearch(text, f)
281 start, end = _msearch(text, f)
268 if start == end:
282 if start == end:
269 return None, None
283 return None, None
270 l = text[start:end]
284 l = text[start:end]
271 f, n = l.split('\0')
285 f, n = l.split('\0')
272 return revlog.bin(n[:40]), n[40:-1]
286 return revlog.bin(n[:40]), n[40:-1]
273
287
274 def add(self, m, transaction, link, p1, p2, added, removed):
288 def add(self, m, transaction, link, p1, p2, added, removed):
275 if p1 in self._mancache:
289 if p1 in self._mancache:
276 # If our first parent is in the manifest cache, we can
290 # If our first parent is in the manifest cache, we can
277 # compute a delta here using properties we know about the
291 # compute a delta here using properties we know about the
278 # manifest up-front, which may save time later for the
292 # manifest up-front, which may save time later for the
279 # revlog layer.
293 # revlog layer.
280
294
281 _checkforbidden(added)
295 _checkforbidden(added)
282 # combine the changed lists into one list for sorting
296 # combine the changed lists into one list for sorting
283 work = [(x, False) for x in added]
297 work = [(x, False) for x in added]
284 work.extend((x, True) for x in removed)
298 work.extend((x, True) for x in removed)
285 # this could use heapq.merge() (from Python 2.6+) or equivalent
299 # this could use heapq.merge() (from Python 2.6+) or equivalent
286 # since the lists are already sorted
300 # since the lists are already sorted
287 work.sort()
301 work.sort()
288
302
289 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
303 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
290 cachedelta = self.rev(p1), deltatext
304 cachedelta = self.rev(p1), deltatext
291 text = util.buffer(arraytext)
305 text = util.buffer(arraytext)
292 else:
306 else:
293 # The first parent manifest isn't already loaded, so we'll
307 # The first parent manifest isn't already loaded, so we'll
294 # just encode a fulltext of the manifest and pass that
308 # just encode a fulltext of the manifest and pass that
295 # through to the revlog layer, and let it handle the delta
309 # through to the revlog layer, and let it handle the delta
296 # process.
310 # process.
297 text = m.text()
311 text = m.text()
298 arraytext = array.array('c', text)
312 arraytext = array.array('c', text)
299 cachedelta = None
313 cachedelta = None
300
314
301 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
315 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
302 self._mancache[n] = (m, arraytext)
316 self._mancache[n] = (m, arraytext)
303
317
304 return n
318 return n
@@ -1,121 +1,112 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.node import bin, nullid
8 from mercurial.node import nullid
9 from mercurial import util
9 from mercurial import util
10 import struct, zlib, cStringIO
10 import struct, zlib, cStringIO
11
11
12 _pack = struct.pack
12 _pack = struct.pack
13 _unpack = struct.unpack
13 _unpack = struct.unpack
14 _compress = zlib.compress
14 _compress = zlib.compress
15 _decompress = zlib.decompress
15 _decompress = zlib.decompress
16 _sha = util.sha1
16 _sha = util.sha1
17
17
18 # Some code below makes tuples directly because it's more convenient. However,
18 # Some code below makes tuples directly because it's more convenient. However,
19 # code outside this module should always use dirstatetuple.
19 # code outside this module should always use dirstatetuple.
20 def dirstatetuple(*x):
20 def dirstatetuple(*x):
21 # x is a tuple
21 # x is a tuple
22 return x
22 return x
23
23
24 def parse_manifest(mfdict, fdict, lines):
25 for l in lines.splitlines():
26 f, n = l.split('\0')
27 if len(n) > 40:
28 fdict[f] = n[40:]
29 mfdict[f] = bin(n[:40])
30 else:
31 mfdict[f] = bin(n)
32
33 def parse_index2(data, inline):
24 def parse_index2(data, inline):
34 def gettype(q):
25 def gettype(q):
35 return int(q & 0xFFFF)
26 return int(q & 0xFFFF)
36
27
37 def offset_type(offset, type):
28 def offset_type(offset, type):
38 return long(long(offset) << 16 | type)
29 return long(long(offset) << 16 | type)
39
30
40 indexformatng = ">Qiiiiii20s12x"
31 indexformatng = ">Qiiiiii20s12x"
41
32
42 s = struct.calcsize(indexformatng)
33 s = struct.calcsize(indexformatng)
43 index = []
34 index = []
44 cache = None
35 cache = None
45 off = 0
36 off = 0
46
37
47 l = len(data) - s
38 l = len(data) - s
48 append = index.append
39 append = index.append
49 if inline:
40 if inline:
50 cache = (0, data)
41 cache = (0, data)
51 while off <= l:
42 while off <= l:
52 e = _unpack(indexformatng, data[off:off + s])
43 e = _unpack(indexformatng, data[off:off + s])
53 append(e)
44 append(e)
54 if e[1] < 0:
45 if e[1] < 0:
55 break
46 break
56 off += e[1] + s
47 off += e[1] + s
57 else:
48 else:
58 while off <= l:
49 while off <= l:
59 e = _unpack(indexformatng, data[off:off + s])
50 e = _unpack(indexformatng, data[off:off + s])
60 append(e)
51 append(e)
61 off += s
52 off += s
62
53
63 if off != len(data):
54 if off != len(data):
64 raise ValueError('corrupt index file')
55 raise ValueError('corrupt index file')
65
56
66 if index:
57 if index:
67 e = list(index[0])
58 e = list(index[0])
68 type = gettype(e[0])
59 type = gettype(e[0])
69 e[0] = offset_type(0, type)
60 e[0] = offset_type(0, type)
70 index[0] = tuple(e)
61 index[0] = tuple(e)
71
62
72 # add the magic null revision at -1
63 # add the magic null revision at -1
73 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
64 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
74
65
75 return index, cache
66 return index, cache
76
67
77 def parse_dirstate(dmap, copymap, st):
68 def parse_dirstate(dmap, copymap, st):
78 parents = [st[:20], st[20: 40]]
69 parents = [st[:20], st[20: 40]]
79 # dereference fields so they will be local in loop
70 # dereference fields so they will be local in loop
80 format = ">cllll"
71 format = ">cllll"
81 e_size = struct.calcsize(format)
72 e_size = struct.calcsize(format)
82 pos1 = 40
73 pos1 = 40
83 l = len(st)
74 l = len(st)
84
75
85 # the inner loop
76 # the inner loop
86 while pos1 < l:
77 while pos1 < l:
87 pos2 = pos1 + e_size
78 pos2 = pos1 + e_size
88 e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
79 e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
89 pos1 = pos2 + e[4]
80 pos1 = pos2 + e[4]
90 f = st[pos2:pos1]
81 f = st[pos2:pos1]
91 if '\0' in f:
82 if '\0' in f:
92 f, c = f.split('\0')
83 f, c = f.split('\0')
93 copymap[f] = c
84 copymap[f] = c
94 dmap[f] = e[:4]
85 dmap[f] = e[:4]
95 return parents
86 return parents
96
87
97 def pack_dirstate(dmap, copymap, pl, now):
88 def pack_dirstate(dmap, copymap, pl, now):
98 now = int(now)
89 now = int(now)
99 cs = cStringIO.StringIO()
90 cs = cStringIO.StringIO()
100 write = cs.write
91 write = cs.write
101 write("".join(pl))
92 write("".join(pl))
102 for f, e in dmap.iteritems():
93 for f, e in dmap.iteritems():
103 if e[0] == 'n' and e[3] == now:
94 if e[0] == 'n' and e[3] == now:
104 # The file was last modified "simultaneously" with the current
95 # The file was last modified "simultaneously" with the current
105 # write to dirstate (i.e. within the same second for file-
96 # write to dirstate (i.e. within the same second for file-
106 # systems with a granularity of 1 sec). This commonly happens
97 # systems with a granularity of 1 sec). This commonly happens
107 # for at least a couple of files on 'update'.
98 # for at least a couple of files on 'update'.
108 # The user could change the file without changing its size
99 # The user could change the file without changing its size
109 # within the same second. Invalidate the file's mtime in
100 # within the same second. Invalidate the file's mtime in
110 # dirstate, forcing future 'status' calls to compare the
101 # dirstate, forcing future 'status' calls to compare the
111 # contents of the file if the size is the same. This prevents
102 # contents of the file if the size is the same. This prevents
112 # mistakenly treating such files as clean.
103 # mistakenly treating such files as clean.
113 e = dirstatetuple(e[0], e[1], e[2], -1)
104 e = dirstatetuple(e[0], e[1], e[2], -1)
114 dmap[f] = e
105 dmap[f] = e
115
106
116 if f in copymap:
107 if f in copymap:
117 f = "%s\0%s" % (f, copymap[f])
108 f = "%s\0%s" % (f, copymap[f])
118 e = _pack(">cllll", e[0], e[1], e[2], e[3], len(f))
109 e = _pack(">cllll", e[0], e[1], e[2], e[3], len(f))
119 write(e)
110 write(e)
120 write(f)
111 write(f)
121 return cs.getvalue()
112 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now