Show More
@@ -1,1247 +1,1217 | |||||
1 | /* |
|
1 | /* | |
2 | parsers.c - efficient content parsing |
|
2 | parsers.c - efficient content parsing | |
3 |
|
3 | |||
4 | Copyright 2008 Olivia Mackall <olivia@selenic.com> and others |
|
4 | Copyright 2008 Olivia Mackall <olivia@selenic.com> and others | |
5 |
|
5 | |||
6 | This software may be used and distributed according to the terms of |
|
6 | This software may be used and distributed according to the terms of | |
7 | the GNU General Public License, incorporated herein by reference. |
|
7 | the GNU General Public License, incorporated herein by reference. | |
8 | */ |
|
8 | */ | |
9 |
|
9 | |||
10 | #define PY_SSIZE_T_CLEAN |
|
10 | #define PY_SSIZE_T_CLEAN | |
11 | #include <Python.h> |
|
11 | #include <Python.h> | |
12 | #include <ctype.h> |
|
12 | #include <ctype.h> | |
13 | #include <stddef.h> |
|
13 | #include <stddef.h> | |
14 | #include <string.h> |
|
14 | #include <string.h> | |
15 |
|
15 | |||
16 | #include "bitmanipulation.h" |
|
16 | #include "bitmanipulation.h" | |
17 | #include "charencode.h" |
|
17 | #include "charencode.h" | |
18 | #include "util.h" |
|
18 | #include "util.h" | |
19 |
|
19 | |||
20 | #ifdef IS_PY3K |
|
20 | #ifdef IS_PY3K | |
21 | /* The mapping of Python types is meant to be temporary to get Python |
|
21 | /* The mapping of Python types is meant to be temporary to get Python | |
22 | * 3 to compile. We should remove this once Python 3 support is fully |
|
22 | * 3 to compile. We should remove this once Python 3 support is fully | |
23 | * supported and proper types are used in the extensions themselves. */ |
|
23 | * supported and proper types are used in the extensions themselves. */ | |
24 | #define PyInt_Check PyLong_Check |
|
24 | #define PyInt_Check PyLong_Check | |
25 | #define PyInt_FromLong PyLong_FromLong |
|
25 | #define PyInt_FromLong PyLong_FromLong | |
26 | #define PyInt_FromSsize_t PyLong_FromSsize_t |
|
26 | #define PyInt_FromSsize_t PyLong_FromSsize_t | |
27 | #define PyInt_AsLong PyLong_AsLong |
|
27 | #define PyInt_AsLong PyLong_AsLong | |
28 | #endif |
|
28 | #endif | |
29 |
|
29 | |||
30 | static const char *const versionerrortext = "Python minor version mismatch"; |
|
30 | static const char *const versionerrortext = "Python minor version mismatch"; | |
31 |
|
31 | |||
32 | static const int dirstate_v1_from_p2 = -2; |
|
32 | static const int dirstate_v1_from_p2 = -2; | |
33 | static const int dirstate_v1_nonnormal = -1; |
|
33 | static const int dirstate_v1_nonnormal = -1; | |
34 | static const int ambiguous_time = -1; |
|
34 | static const int ambiguous_time = -1; | |
35 |
|
35 | |||
36 | static PyObject *dict_new_presized(PyObject *self, PyObject *args) |
|
36 | static PyObject *dict_new_presized(PyObject *self, PyObject *args) | |
37 | { |
|
37 | { | |
38 | Py_ssize_t expected_size; |
|
38 | Py_ssize_t expected_size; | |
39 |
|
39 | |||
40 | if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) { |
|
40 | if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) { | |
41 | return NULL; |
|
41 | return NULL; | |
42 | } |
|
42 | } | |
43 |
|
43 | |||
44 | return _dict_new_presized(expected_size); |
|
44 | return _dict_new_presized(expected_size); | |
45 | } |
|
45 | } | |
46 |
|
46 | |||
47 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, |
|
47 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, | |
48 | PyObject *kwds) |
|
48 | PyObject *kwds) | |
49 | { |
|
49 | { | |
50 | /* We do all the initialization here and not a tp_init function because |
|
50 | /* We do all the initialization here and not a tp_init function because | |
51 | * dirstate_item is immutable. */ |
|
51 | * dirstate_item is immutable. */ | |
52 | dirstateItemObject *t; |
|
52 | dirstateItemObject *t; | |
53 | int wc_tracked; |
|
53 | int wc_tracked; | |
54 | int p1_tracked; |
|
54 | int p1_tracked; | |
55 |
int p2_ |
|
55 | int p2_info; | |
56 | int merged; |
|
56 | int has_meaningful_data; | |
57 | int clean_p1; |
|
57 | int has_meaningful_mtime; | |
58 | int clean_p2; |
|
58 | int mode; | |
59 | int possibly_dirty; |
|
59 | int size; | |
|
60 | int mtime; | |||
60 | PyObject *parentfiledata; |
|
61 | PyObject *parentfiledata; | |
61 | static char *keywords_name[] = { |
|
62 | static char *keywords_name[] = { | |
62 | "wc_tracked", "p1_tracked", "p2_tracked", |
|
63 | "wc_tracked", | |
63 | "merged", "clean_p1", "clean_p2", |
|
64 | "p1_tracked", | |
64 | "possibly_dirty", "parentfiledata", NULL, |
|
65 | "p2_info", | |
|
66 | "has_meaningful_data", | |||
|
67 | "has_meaningful_mtime", | |||
|
68 | "parentfiledata", | |||
|
69 | NULL, | |||
65 | }; |
|
70 | }; | |
66 | wc_tracked = 0; |
|
71 | wc_tracked = 0; | |
67 | p1_tracked = 0; |
|
72 | p1_tracked = 0; | |
68 |
p2_ |
|
73 | p2_info = 0; | |
69 | merged = 0; |
|
74 | has_meaningful_mtime = 1; | |
70 | clean_p1 = 0; |
|
75 | has_meaningful_data = 1; | |
71 | clean_p2 = 0; |
|
|||
72 | possibly_dirty = 0; |
|
|||
73 | parentfiledata = Py_None; |
|
76 | parentfiledata = Py_None; | |
74 |
if (!PyArg_ParseTupleAndKeywords( |
|
77 | if (!PyArg_ParseTupleAndKeywords( | |
75 | &wc_tracked, &p1_tracked, &p2_tracked, |
|
78 | args, kwds, "|iiiiiO", keywords_name, &wc_tracked, &p1_tracked, | |
76 | &merged, &clean_p1, &clean_p2, |
|
79 | &p2_info, &has_meaningful_data, &has_meaningful_mtime, | |
77 | &possibly_dirty, &parentfiledata |
|
80 | &parentfiledata)) { | |
78 |
|
||||
79 | )) { |
|
|||
80 | return NULL; |
|
|||
81 | } |
|
|||
82 | if (merged && (clean_p1 || clean_p2)) { |
|
|||
83 | PyErr_SetString(PyExc_RuntimeError, |
|
|||
84 | "`merged` argument incompatible with " |
|
|||
85 | "`clean_p1`/`clean_p2`"); |
|
|||
86 | return NULL; |
|
81 | return NULL; | |
87 | } |
|
82 | } | |
88 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
83 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
89 | if (!t) { |
|
84 | if (!t) { | |
90 | return NULL; |
|
85 | return NULL; | |
91 | } |
|
86 | } | |
92 |
|
87 | |||
93 | t->flags = 0; |
|
88 | t->flags = 0; | |
94 | if (wc_tracked) { |
|
89 | if (wc_tracked) { | |
95 | t->flags |= dirstate_flag_wc_tracked; |
|
90 | t->flags |= dirstate_flag_wc_tracked; | |
96 | } |
|
91 | } | |
97 | if (p1_tracked) { |
|
92 | if (p1_tracked) { | |
98 | t->flags |= dirstate_flag_p1_tracked; |
|
93 | t->flags |= dirstate_flag_p1_tracked; | |
99 | } |
|
94 | } | |
100 |
if (p2_ |
|
95 | if (p2_info) { | |
101 |
t->flags |= dirstate_flag_p2_ |
|
96 | t->flags |= dirstate_flag_p2_info; | |
102 | } |
|
|||
103 | if (possibly_dirty) { |
|
|||
104 | t->flags |= dirstate_flag_possibly_dirty; |
|
|||
105 | } |
|
|||
106 | if (merged) { |
|
|||
107 | t->flags |= dirstate_flag_merged; |
|
|||
108 | } |
|
97 | } | |
109 | if (clean_p1) { |
|
98 | ||
110 | t->flags |= dirstate_flag_clean_p1; |
|
|||
111 | } |
|
|||
112 | if (clean_p2) { |
|
|||
113 | t->flags |= dirstate_flag_clean_p2; |
|
|||
114 | } |
|
|||
115 | t->mode = 0; |
|
|||
116 | t->size = dirstate_v1_nonnormal; |
|
|||
117 | t->mtime = ambiguous_time; |
|
|||
118 | if (parentfiledata != Py_None) { |
|
99 | if (parentfiledata != Py_None) { | |
119 | if (!PyTuple_CheckExact(parentfiledata)) { |
|
100 | if (!PyTuple_CheckExact(parentfiledata)) { | |
120 | PyErr_SetString( |
|
101 | PyErr_SetString( | |
121 | PyExc_TypeError, |
|
102 | PyExc_TypeError, | |
122 | "parentfiledata should be a Tuple or None"); |
|
103 | "parentfiledata should be a Tuple or None"); | |
123 | return NULL; |
|
104 | return NULL; | |
124 | } |
|
105 | } | |
125 | t->mode = |
|
106 | mode = (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0)); | |
126 |
|
|
107 | size = (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1)); | |
127 | t->size = |
|
108 | mtime = (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2)); | |
128 | (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1)); |
|
109 | } else { | |
129 | t->mtime = |
|
110 | has_meaningful_data = 0; | |
130 | (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2)); |
|
111 | has_meaningful_mtime = 0; | |
|
112 | } | |||
|
113 | if (has_meaningful_data) { | |||
|
114 | t->flags |= dirstate_flag_has_meaningful_data; | |||
|
115 | t->mode = mode; | |||
|
116 | t->size = size; | |||
|
117 | } else { | |||
|
118 | t->mode = 0; | |||
|
119 | t->size = 0; | |||
|
120 | } | |||
|
121 | if (has_meaningful_mtime) { | |||
|
122 | t->flags |= dirstate_flag_has_meaningful_mtime; | |||
|
123 | t->mtime = mtime; | |||
|
124 | } else { | |||
|
125 | t->mtime = 0; | |||
131 | } |
|
126 | } | |
132 | return (PyObject *)t; |
|
127 | return (PyObject *)t; | |
133 | } |
|
128 | } | |
134 |
|
129 | |||
135 | static void dirstate_item_dealloc(PyObject *o) |
|
130 | static void dirstate_item_dealloc(PyObject *o) | |
136 | { |
|
131 | { | |
137 | PyObject_Del(o); |
|
132 | PyObject_Del(o); | |
138 | } |
|
133 | } | |
139 |
|
134 | |||
140 | static inline bool dirstate_item_c_tracked(dirstateItemObject *self) |
|
135 | static inline bool dirstate_item_c_tracked(dirstateItemObject *self) | |
141 | { |
|
136 | { | |
142 | return (self->flags & dirstate_flag_wc_tracked); |
|
137 | return (self->flags & dirstate_flag_wc_tracked); | |
143 | } |
|
138 | } | |
144 |
|
139 | |||
|
140 | static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self) | |||
|
141 | { | |||
|
142 | const unsigned char mask = dirstate_flag_wc_tracked | | |||
|
143 | dirstate_flag_p1_tracked | | |||
|
144 | dirstate_flag_p2_info; | |||
|
145 | return (self->flags & mask); | |||
|
146 | } | |||
|
147 | ||||
145 | static inline bool dirstate_item_c_added(dirstateItemObject *self) |
|
148 | static inline bool dirstate_item_c_added(dirstateItemObject *self) | |
146 | { |
|
149 | { | |
147 | unsigned char mask = |
|
150 | const unsigned char mask = | |
148 | (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
151 | (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |
149 |
dirstate_flag_p2_ |
|
152 | dirstate_flag_p2_info); | |
150 | unsigned char target = dirstate_flag_wc_tracked; |
|
153 | const unsigned char target = dirstate_flag_wc_tracked; | |
151 | return (self->flags & mask) == target; |
|
154 | return (self->flags & mask) == target; | |
152 | } |
|
155 | } | |
153 |
|
156 | |||
154 | static inline bool dirstate_item_c_removed(dirstateItemObject *self) |
|
157 | static inline bool dirstate_item_c_removed(dirstateItemObject *self) | |
155 | { |
|
158 | { | |
156 | if (self->flags & dirstate_flag_wc_tracked) { |
|
159 | if (self->flags & dirstate_flag_wc_tracked) { | |
157 | return false; |
|
160 | return false; | |
158 | } |
|
161 | } | |
159 | return (self->flags & |
|
162 | return (self->flags & | |
160 |
(dirstate_flag_p1_tracked | dirstate_flag_p2_ |
|
163 | (dirstate_flag_p1_tracked | dirstate_flag_p2_info)); | |
161 | } |
|
164 | } | |
162 |
|
165 | |||
163 | static inline bool dirstate_item_c_merged(dirstateItemObject *self) |
|
166 | static inline bool dirstate_item_c_merged(dirstateItemObject *self) | |
164 | { |
|
167 | { | |
165 | return ((self->flags & dirstate_flag_wc_tracked) && |
|
168 | return ((self->flags & dirstate_flag_wc_tracked) && | |
166 |
(self->flags & dirstate_flag_ |
|
169 | (self->flags & dirstate_flag_p1_tracked) && | |
|
170 | (self->flags & dirstate_flag_p2_info)); | |||
167 | } |
|
171 | } | |
168 |
|
172 | |||
169 | static inline bool dirstate_item_c_from_p2(dirstateItemObject *self) |
|
173 | static inline bool dirstate_item_c_from_p2(dirstateItemObject *self) | |
170 | { |
|
174 | { | |
171 | if (!dirstate_item_c_tracked(self)) { |
|
175 | return ((self->flags & dirstate_flag_wc_tracked) && | |
172 | return false; |
|
176 | !(self->flags & dirstate_flag_p1_tracked) && | |
173 | } |
|
177 | (self->flags & dirstate_flag_p2_info)); | |
174 | return (self->flags & dirstate_flag_clean_p2); |
|
|||
175 | } |
|
178 | } | |
176 |
|
179 | |||
177 | static inline char dirstate_item_c_v1_state(dirstateItemObject *self) |
|
180 | static inline char dirstate_item_c_v1_state(dirstateItemObject *self) | |
178 | { |
|
181 | { | |
179 | if (dirstate_item_c_removed(self)) { |
|
182 | if (dirstate_item_c_removed(self)) { | |
180 | return 'r'; |
|
183 | return 'r'; | |
181 | } else if (dirstate_item_c_merged(self)) { |
|
184 | } else if (dirstate_item_c_merged(self)) { | |
182 | return 'm'; |
|
185 | return 'm'; | |
183 | } else if (dirstate_item_c_added(self)) { |
|
186 | } else if (dirstate_item_c_added(self)) { | |
184 | return 'a'; |
|
187 | return 'a'; | |
185 | } else { |
|
188 | } else { | |
186 | return 'n'; |
|
189 | return 'n'; | |
187 | } |
|
190 | } | |
188 | } |
|
191 | } | |
189 |
|
192 | |||
190 | static inline int dirstate_item_c_v1_mode(dirstateItemObject *self) |
|
193 | static inline int dirstate_item_c_v1_mode(dirstateItemObject *self) | |
191 | { |
|
194 | { | |
|
195 | if (self->flags & dirstate_flag_has_meaningful_data) { | |||
192 | return self->mode; |
|
196 | return self->mode; | |
|
197 | } else { | |||
|
198 | return 0; | |||
|
199 | } | |||
193 | } |
|
200 | } | |
194 |
|
201 | |||
195 | static inline int dirstate_item_c_v1_size(dirstateItemObject *self) |
|
202 | static inline int dirstate_item_c_v1_size(dirstateItemObject *self) | |
196 | { |
|
203 | { | |
197 | if (dirstate_item_c_removed(self) && |
|
204 | if (!(self->flags & dirstate_flag_wc_tracked) && | |
198 |
(self->flags & dirstate_flag_ |
|
205 | (self->flags & dirstate_flag_p2_info)) { | |
|
206 | if (self->flags & dirstate_flag_p1_tracked) { | |||
199 | return dirstate_v1_nonnormal; |
|
207 | return dirstate_v1_nonnormal; | |
200 | } else if (dirstate_item_c_removed(self) && |
|
208 | } else { | |
201 | (self->flags & dirstate_flag_clean_p2)) { |
|
|||
202 | return dirstate_v1_from_p2; |
|
209 | return dirstate_v1_from_p2; | |
|
210 | } | |||
203 | } else if (dirstate_item_c_removed(self)) { |
|
211 | } else if (dirstate_item_c_removed(self)) { | |
204 | return 0; |
|
212 | return 0; | |
205 | } else if (dirstate_item_c_merged(self)) { |
|
213 | } else if (self->flags & dirstate_flag_p2_info) { | |
206 | return dirstate_v1_from_p2; |
|
214 | return dirstate_v1_from_p2; | |
207 | } else if (dirstate_item_c_added(self)) { |
|
215 | } else if (dirstate_item_c_added(self)) { | |
208 | return dirstate_v1_nonnormal; |
|
216 | return dirstate_v1_nonnormal; | |
209 | } else if (dirstate_item_c_from_p2(self)) { |
|
217 | } else if (self->flags & dirstate_flag_has_meaningful_data) { | |
210 | return dirstate_v1_from_p2; |
|
218 | return self->size; | |
211 | } else if (self->flags & dirstate_flag_possibly_dirty) { |
|
|||
212 | return self->size; /* NON NORMAL ? */ |
|
|||
213 | } else { |
|
219 | } else { | |
214 | return self->size; |
|
220 | return dirstate_v1_nonnormal; | |
215 | } |
|
221 | } | |
216 | } |
|
222 | } | |
217 |
|
223 | |||
218 | static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self) |
|
224 | static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self) | |
219 | { |
|
225 | { | |
220 | if (dirstate_item_c_removed(self)) { |
|
226 | if (dirstate_item_c_removed(self)) { | |
221 | return 0; |
|
227 | return 0; | |
222 |
} else if (self->flags & dirstate_flag_ |
|
228 | } else if (!(self->flags & dirstate_flag_has_meaningful_mtime) || | |
223 | return ambiguous_time; |
|
229 | !(self->flags & dirstate_flag_p1_tracked) || | |
224 | } else if (dirstate_item_c_merged(self)) { |
|
230 | !(self->flags & dirstate_flag_wc_tracked) || | |
225 | return ambiguous_time; |
|
231 | (self->flags & dirstate_flag_p2_info)) { | |
226 | } else if (dirstate_item_c_added(self)) { |
|
|||
227 | return ambiguous_time; |
|
|||
228 | } else if (dirstate_item_c_from_p2(self)) { |
|
|||
229 | return ambiguous_time; |
|
232 | return ambiguous_time; | |
230 | } else { |
|
233 | } else { | |
231 | return self->mtime; |
|
234 | return self->mtime; | |
232 | } |
|
235 | } | |
233 | } |
|
236 | } | |
234 |
|
237 | |||
235 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) |
|
238 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) | |
236 | { |
|
239 | { | |
237 | char state = dirstate_item_c_v1_state(self); |
|
240 | char state = dirstate_item_c_v1_state(self); | |
238 | return PyBytes_FromStringAndSize(&state, 1); |
|
241 | return PyBytes_FromStringAndSize(&state, 1); | |
239 | }; |
|
242 | }; | |
240 |
|
243 | |||
241 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) |
|
244 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) | |
242 | { |
|
245 | { | |
243 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); |
|
246 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); | |
244 | }; |
|
247 | }; | |
245 |
|
248 | |||
246 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) |
|
249 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) | |
247 | { |
|
250 | { | |
248 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); |
|
251 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); | |
249 | }; |
|
252 | }; | |
250 |
|
253 | |||
251 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) |
|
254 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) | |
252 | { |
|
255 | { | |
253 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); |
|
256 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); | |
254 | }; |
|
257 | }; | |
255 |
|
258 | |||
256 | static PyObject *dirstate_item_need_delay(dirstateItemObject *self, |
|
259 | static PyObject *dirstate_item_need_delay(dirstateItemObject *self, | |
257 | PyObject *value) |
|
260 | PyObject *value) | |
258 | { |
|
261 | { | |
259 | long now; |
|
262 | long now; | |
260 | if (!pylong_to_long(value, &now)) { |
|
263 | if (!pylong_to_long(value, &now)) { | |
261 | return NULL; |
|
264 | return NULL; | |
262 | } |
|
265 | } | |
263 | if (dirstate_item_c_v1_state(self) == 'n' && |
|
266 | if (dirstate_item_c_v1_state(self) == 'n' && | |
264 | dirstate_item_c_v1_mtime(self) == now) { |
|
267 | dirstate_item_c_v1_mtime(self) == now) { | |
265 | Py_RETURN_TRUE; |
|
268 | Py_RETURN_TRUE; | |
266 | } else { |
|
269 | } else { | |
267 | Py_RETURN_FALSE; |
|
270 | Py_RETURN_FALSE; | |
268 | } |
|
271 | } | |
269 | }; |
|
272 | }; | |
270 |
|
273 | |||
271 | /* This will never change since it's bound to V1 |
|
274 | /* This will never change since it's bound to V1 | |
272 | */ |
|
275 | */ | |
273 | static inline dirstateItemObject * |
|
276 | static inline dirstateItemObject * | |
274 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) |
|
277 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) | |
275 | { |
|
278 | { | |
276 | dirstateItemObject *t = |
|
279 | dirstateItemObject *t = | |
277 | PyObject_New(dirstateItemObject, &dirstateItemType); |
|
280 | PyObject_New(dirstateItemObject, &dirstateItemType); | |
278 | if (!t) { |
|
281 | if (!t) { | |
279 | return NULL; |
|
282 | return NULL; | |
280 | } |
|
283 | } | |
281 |
|
284 | t->flags = 0; | ||
282 | if (state == 'm') { |
|
|||
283 | t->flags = |
|
|||
284 | (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
|||
285 | dirstate_flag_p2_tracked | dirstate_flag_merged); |
|
|||
286 | t->mode = 0; |
|
|||
287 | t->size = dirstate_v1_from_p2; |
|
|||
288 | t->mtime = ambiguous_time; |
|
|||
289 | } else if (state == 'a') { |
|
|||
290 | t->flags = dirstate_flag_wc_tracked; |
|
|||
291 | t->mode = 0; |
|
|||
292 | t->size = dirstate_v1_nonnormal; |
|
|||
293 | t->mtime = ambiguous_time; |
|
|||
294 | } else if (state == 'r') { |
|
|||
295 |
|
|
285 | t->mode = 0; | |
296 |
|
|
286 | t->size = 0; | |
297 |
|
|
287 | t->mtime = 0; | |
|
288 | ||||
|
289 | if (state == 'm') { | |||
|
290 | t->flags = (dirstate_flag_wc_tracked | | |||
|
291 | dirstate_flag_p1_tracked | dirstate_flag_p2_info); | |||
|
292 | } else if (state == 'a') { | |||
|
293 | t->flags = dirstate_flag_wc_tracked; | |||
|
294 | } else if (state == 'r') { | |||
298 | if (size == dirstate_v1_nonnormal) { |
|
295 | if (size == dirstate_v1_nonnormal) { | |
299 | t->flags = |
|
296 | t->flags = | |
300 |
|
|
297 | dirstate_flag_p1_tracked | dirstate_flag_p2_info; | |
301 | dirstate_flag_p2_tracked | dirstate_flag_merged); |
|
|||
302 | } else if (size == dirstate_v1_from_p2) { |
|
298 | } else if (size == dirstate_v1_from_p2) { | |
303 | t->flags = |
|
299 | t->flags = dirstate_flag_p2_info; | |
304 | (dirstate_flag_p2_tracked | dirstate_flag_clean_p2); |
|
|||
305 | } else { |
|
300 | } else { | |
306 | t->flags = dirstate_flag_p1_tracked; |
|
301 | t->flags = dirstate_flag_p1_tracked; | |
307 | } |
|
302 | } | |
308 | } else if (state == 'n') { |
|
303 | } else if (state == 'n') { | |
309 | if (size == dirstate_v1_from_p2) { |
|
304 | if (size == dirstate_v1_from_p2) { | |
310 | t->flags = |
|
305 | t->flags = | |
311 |
|
|
306 | dirstate_flag_wc_tracked | dirstate_flag_p2_info; | |
312 | dirstate_flag_p2_tracked | dirstate_flag_clean_p2); |
|
|||
313 | t->mode = 0; |
|
|||
314 | t->size = dirstate_v1_from_p2; |
|
|||
315 | t->mtime = ambiguous_time; |
|
|||
316 | } else if (size == dirstate_v1_nonnormal) { |
|
307 | } else if (size == dirstate_v1_nonnormal) { | |
317 | t->flags = (dirstate_flag_wc_tracked | |
|
308 | t->flags = | |
318 |
|
|
309 | dirstate_flag_wc_tracked | dirstate_flag_p1_tracked; | |
319 | dirstate_flag_possibly_dirty); |
|
|||
320 | t->mode = 0; |
|
|||
321 | t->size = dirstate_v1_nonnormal; |
|
|||
322 | t->mtime = ambiguous_time; |
|
|||
323 | } else if (mtime == ambiguous_time) { |
|
310 | } else if (mtime == ambiguous_time) { | |
324 | t->flags = (dirstate_flag_wc_tracked | |
|
311 | t->flags = (dirstate_flag_wc_tracked | | |
325 | dirstate_flag_p1_tracked | |
|
312 | dirstate_flag_p1_tracked | | |
326 |
dirstate_flag_ |
|
313 | dirstate_flag_has_meaningful_data); | |
327 | t->mode = mode; |
|
314 | t->mode = mode; | |
328 | t->size = size; |
|
315 | t->size = size; | |
329 | t->mtime = 0; |
|
|||
330 | } else { |
|
316 | } else { | |
331 | t->flags = (dirstate_flag_wc_tracked | |
|
317 | t->flags = (dirstate_flag_wc_tracked | | |
332 |
dirstate_flag_p1_tracked |
|
318 | dirstate_flag_p1_tracked | | |
|
319 | dirstate_flag_has_meaningful_data | | |||
|
320 | dirstate_flag_has_meaningful_mtime); | |||
333 | t->mode = mode; |
|
321 | t->mode = mode; | |
334 | t->size = size; |
|
322 | t->size = size; | |
335 | t->mtime = mtime; |
|
323 | t->mtime = mtime; | |
336 | } |
|
324 | } | |
337 | } else { |
|
325 | } else { | |
338 | PyErr_Format(PyExc_RuntimeError, |
|
326 | PyErr_Format(PyExc_RuntimeError, | |
339 | "unknown state: `%c` (%d, %d, %d)", state, mode, |
|
327 | "unknown state: `%c` (%d, %d, %d)", state, mode, | |
340 | size, mtime, NULL); |
|
328 | size, mtime, NULL); | |
341 | Py_DECREF(t); |
|
329 | Py_DECREF(t); | |
342 | return NULL; |
|
330 | return NULL; | |
343 | } |
|
331 | } | |
344 |
|
332 | |||
345 | return t; |
|
333 | return t; | |
346 | } |
|
334 | } | |
347 |
|
335 | |||
348 | /* This will never change since it's bound to V1, unlike `dirstate_item_new` */ |
|
336 | /* This will never change since it's bound to V1, unlike `dirstate_item_new` */ | |
349 | static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype, |
|
337 | static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype, | |
350 | PyObject *args) |
|
338 | PyObject *args) | |
351 | { |
|
339 | { | |
352 | /* We do all the initialization here and not a tp_init function because |
|
340 | /* We do all the initialization here and not a tp_init function because | |
353 | * dirstate_item is immutable. */ |
|
341 | * dirstate_item is immutable. */ | |
354 | char state; |
|
342 | char state; | |
355 | int size, mode, mtime; |
|
343 | int size, mode, mtime; | |
356 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { |
|
344 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { | |
357 | return NULL; |
|
345 | return NULL; | |
358 | } |
|
346 | } | |
359 | return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); |
|
347 | return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); | |
360 | }; |
|
348 | }; | |
361 |
|
349 | |||
362 | /* constructor to help legacy API to build a new "added" item |
|
350 | /* constructor to help legacy API to build a new "added" item | |
363 |
|
351 | |||
364 | Should eventually be removed */ |
|
352 | Should eventually be removed */ | |
365 | static PyObject *dirstate_item_new_added(PyTypeObject *subtype) |
|
353 | static PyObject *dirstate_item_new_added(PyTypeObject *subtype) | |
366 | { |
|
354 | { | |
367 | dirstateItemObject *t; |
|
355 | dirstateItemObject *t; | |
368 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
356 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
369 | if (!t) { |
|
357 | if (!t) { | |
370 | return NULL; |
|
358 | return NULL; | |
371 | } |
|
359 | } | |
372 | t->flags = dirstate_flag_wc_tracked; |
|
360 | t->flags = dirstate_flag_wc_tracked; | |
373 | t->mode = 0; |
|
361 | t->mode = 0; | |
374 | t->size = dirstate_v1_nonnormal; |
|
362 | t->size = 0; | |
375 |
t->mtime = |
|
363 | t->mtime = 0; | |
376 | return (PyObject *)t; |
|
364 | return (PyObject *)t; | |
377 | }; |
|
365 | }; | |
378 |
|
366 | |||
379 | /* constructor to help legacy API to build a new "merged" item |
|
367 | /* constructor to help legacy API to build a new "merged" item | |
380 |
|
368 | |||
381 | Should eventually be removed */ |
|
369 | Should eventually be removed */ | |
382 | static PyObject *dirstate_item_new_merged(PyTypeObject *subtype) |
|
370 | static PyObject *dirstate_item_new_merged(PyTypeObject *subtype) | |
383 | { |
|
371 | { | |
384 | dirstateItemObject *t; |
|
372 | dirstateItemObject *t; | |
385 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
373 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
386 | if (!t) { |
|
374 | if (!t) { | |
387 | return NULL; |
|
375 | return NULL; | |
388 | } |
|
376 | } | |
389 | t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
377 | t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |
390 |
dirstate_flag_p2_ |
|
378 | dirstate_flag_p2_info); | |
391 | t->mode = 0; |
|
379 | t->mode = 0; | |
392 | t->size = dirstate_v1_from_p2; |
|
380 | t->size = 0; | |
393 |
t->mtime = |
|
381 | t->mtime = 0; | |
394 | return (PyObject *)t; |
|
382 | return (PyObject *)t; | |
395 | }; |
|
383 | }; | |
396 |
|
384 | |||
397 | /* constructor to help legacy API to build a new "from_p2" item |
|
385 | /* constructor to help legacy API to build a new "from_p2" item | |
398 |
|
386 | |||
399 | Should eventually be removed */ |
|
387 | Should eventually be removed */ | |
400 | static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype) |
|
388 | static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype) | |
401 | { |
|
389 | { | |
402 | /* We do all the initialization here and not a tp_init function because |
|
390 | /* We do all the initialization here and not a tp_init function because | |
403 | * dirstate_item is immutable. */ |
|
391 | * dirstate_item is immutable. */ | |
404 | dirstateItemObject *t; |
|
392 | dirstateItemObject *t; | |
405 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
393 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
406 | if (!t) { |
|
394 | if (!t) { | |
407 | return NULL; |
|
395 | return NULL; | |
408 | } |
|
396 | } | |
409 |
t->flags = |
|
397 | t->flags = dirstate_flag_wc_tracked | dirstate_flag_p2_info; | |
410 | dirstate_flag_clean_p2); |
|
|||
411 | t->mode = 0; |
|
398 | t->mode = 0; | |
412 | t->size = dirstate_v1_from_p2; |
|
399 | t->size = 0; | |
413 |
t->mtime = |
|
400 | t->mtime = 0; | |
414 | return (PyObject *)t; |
|
401 | return (PyObject *)t; | |
415 | }; |
|
402 | }; | |
416 |
|
403 | |||
417 | /* constructor to help legacy API to build a new "possibly" item |
|
404 | /* constructor to help legacy API to build a new "possibly" item | |
418 |
|
405 | |||
419 | Should eventually be removed */ |
|
406 | Should eventually be removed */ | |
420 | static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype) |
|
407 | static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype) | |
421 | { |
|
408 | { | |
422 | /* We do all the initialization here and not a tp_init function because |
|
409 | /* We do all the initialization here and not a tp_init function because | |
423 | * dirstate_item is immutable. */ |
|
410 | * dirstate_item is immutable. */ | |
424 | dirstateItemObject *t; |
|
411 | dirstateItemObject *t; | |
425 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
412 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
426 | if (!t) { |
|
413 | if (!t) { | |
427 | return NULL; |
|
414 | return NULL; | |
428 | } |
|
415 | } | |
429 |
t->flags = |
|
416 | t->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked; | |
430 | dirstate_flag_possibly_dirty); |
|
|||
431 | t->mode = 0; |
|
417 | t->mode = 0; | |
432 | t->size = dirstate_v1_nonnormal; |
|
418 | t->size = 0; | |
433 |
t->mtime = |
|
419 | t->mtime = 0; | |
434 | return (PyObject *)t; |
|
420 | return (PyObject *)t; | |
435 | }; |
|
421 | }; | |
436 |
|
422 | |||
437 | /* constructor to help legacy API to build a new "normal" item |
|
423 | /* constructor to help legacy API to build a new "normal" item | |
438 |
|
424 | |||
439 | Should eventually be removed */ |
|
425 | Should eventually be removed */ | |
440 | static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args) |
|
426 | static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args) | |
441 | { |
|
427 | { | |
442 | /* We do all the initialization here and not a tp_init function because |
|
428 | /* We do all the initialization here and not a tp_init function because | |
443 | * dirstate_item is immutable. */ |
|
429 | * dirstate_item is immutable. */ | |
444 | dirstateItemObject *t; |
|
430 | dirstateItemObject *t; | |
445 | int size, mode, mtime; |
|
431 | int size, mode, mtime; | |
446 | if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) { |
|
432 | if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) { | |
447 | return NULL; |
|
433 | return NULL; | |
448 | } |
|
434 | } | |
449 |
|
435 | |||
450 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
436 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
451 | if (!t) { |
|
437 | if (!t) { | |
452 | return NULL; |
|
438 | return NULL; | |
453 | } |
|
439 | } | |
454 | t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked); |
|
440 | t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked); | |
455 | t->mode = mode; |
|
441 | t->mode = mode; | |
456 | t->size = size; |
|
442 | t->size = size; | |
457 | t->mtime = mtime; |
|
443 | t->mtime = mtime; | |
458 | return (PyObject *)t; |
|
444 | return (PyObject *)t; | |
459 | }; |
|
445 | }; | |
460 |
|
446 | |||
461 | /* This means the next status call will have to actually check its content |
|
447 | /* This means the next status call will have to actually check its content | |
462 | to make sure it is correct. */ |
|
448 | to make sure it is correct. */ | |
463 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) |
|
449 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) | |
464 | { |
|
450 | { | |
465 |
self->flags |
|
451 | self->flags &= ~dirstate_flag_has_meaningful_mtime; | |
466 | Py_RETURN_NONE; |
|
452 | Py_RETURN_NONE; | |
467 | } |
|
453 | } | |
468 |
|
454 | |||
469 | /* See docstring of the python implementation for details */ |
|
455 | /* See docstring of the python implementation for details */ | |
470 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, |
|
456 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, | |
471 | PyObject *args) |
|
457 | PyObject *args) | |
472 | { |
|
458 | { | |
473 | int size, mode, mtime; |
|
459 | int size, mode, mtime; | |
474 | if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) { |
|
460 | if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) { | |
475 | return NULL; |
|
461 | return NULL; | |
476 | } |
|
462 | } | |
477 |
self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
|
463 | self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |
|
464 | dirstate_flag_has_meaningful_data | | |||
|
465 | dirstate_flag_has_meaningful_mtime; | |||
478 | self->mode = mode; |
|
466 | self->mode = mode; | |
479 | self->size = size; |
|
467 | self->size = size; | |
480 | self->mtime = mtime; |
|
468 | self->mtime = mtime; | |
481 | Py_RETURN_NONE; |
|
469 | Py_RETURN_NONE; | |
482 | } |
|
470 | } | |
483 |
|
471 | |||
484 | static PyObject *dirstate_item_set_tracked(dirstateItemObject *self) |
|
472 | static PyObject *dirstate_item_set_tracked(dirstateItemObject *self) | |
485 | { |
|
473 | { | |
486 | self->flags |= dirstate_flag_wc_tracked; |
|
474 | self->flags |= dirstate_flag_wc_tracked; | |
487 |
self->flags |
|
475 | self->flags &= ~dirstate_flag_has_meaningful_mtime; | |
488 | /* size = None on the python size turn into size = NON_NORMAL when |
|
|||
489 | * accessed. So the next line is currently required, but a some future |
|
|||
490 | * clean up would be welcome. */ |
|
|||
491 | self->size = dirstate_v1_nonnormal; |
|
|||
492 | Py_RETURN_NONE; |
|
476 | Py_RETURN_NONE; | |
493 | } |
|
477 | } | |
494 |
|
478 | |||
495 | static PyObject *dirstate_item_set_untracked(dirstateItemObject *self) |
|
479 | static PyObject *dirstate_item_set_untracked(dirstateItemObject *self) | |
496 | { |
|
480 | { | |
497 | self->flags &= ~dirstate_flag_wc_tracked; |
|
481 | self->flags &= ~dirstate_flag_wc_tracked; | |
498 | self->mode = 0; |
|
482 | self->mode = 0; | |
499 | self->mtime = 0; |
|
483 | self->mtime = 0; | |
500 | self->size = 0; |
|
484 | self->size = 0; | |
501 | Py_RETURN_NONE; |
|
485 | Py_RETURN_NONE; | |
502 | } |
|
486 | } | |
503 |
|
487 | |||
504 | static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self) |
|
488 | static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self) | |
505 | { |
|
489 | { | |
506 | if (dirstate_item_c_merged(self) || dirstate_item_c_from_p2(self)) { |
|
490 | if (self->flags & dirstate_flag_p2_info) { | |
507 | if (dirstate_item_c_merged(self)) { |
|
491 | self->flags &= ~(dirstate_flag_p2_info | | |
508 | self->flags |= dirstate_flag_p1_tracked; |
|
492 | dirstate_flag_has_meaningful_data | | |
509 | } else { |
|
493 | dirstate_flag_has_meaningful_mtime); | |
510 | self->flags &= ~dirstate_flag_p1_tracked; |
|
|||
511 | } |
|
|||
512 | self->flags &= |
|
|||
513 | ~(dirstate_flag_merged | dirstate_flag_clean_p1 | |
|
|||
514 | dirstate_flag_clean_p2 | dirstate_flag_p2_tracked); |
|
|||
515 | self->flags |= dirstate_flag_possibly_dirty; |
|
|||
516 | self->mode = 0; |
|
494 | self->mode = 0; | |
517 | self->mtime = 0; |
|
495 | self->mtime = 0; | |
518 | /* size = None on the python size turn into size = NON_NORMAL |
|
496 | self->size = 0; | |
519 | * when accessed. So the next line is currently required, but a |
|
|||
520 | * some future clean up would be welcome. */ |
|
|||
521 | self->size = dirstate_v1_nonnormal; |
|
|||
522 | } |
|
497 | } | |
523 | Py_RETURN_NONE; |
|
498 | Py_RETURN_NONE; | |
524 | } |
|
499 | } | |
525 | static PyMethodDef dirstate_item_methods[] = { |
|
500 | static PyMethodDef dirstate_item_methods[] = { | |
526 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, |
|
501 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, | |
527 | "return a \"state\" suitable for v1 serialization"}, |
|
502 | "return a \"state\" suitable for v1 serialization"}, | |
528 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, |
|
503 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, | |
529 | "return a \"mode\" suitable for v1 serialization"}, |
|
504 | "return a \"mode\" suitable for v1 serialization"}, | |
530 | {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS, |
|
505 | {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS, | |
531 | "return a \"size\" suitable for v1 serialization"}, |
|
506 | "return a \"size\" suitable for v1 serialization"}, | |
532 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, |
|
507 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, | |
533 | "return a \"mtime\" suitable for v1 serialization"}, |
|
508 | "return a \"mtime\" suitable for v1 serialization"}, | |
534 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, |
|
509 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, | |
535 | "True if the stored mtime would be ambiguous with the current time"}, |
|
510 | "True if the stored mtime would be ambiguous with the current time"}, | |
536 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, |
|
511 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, | |
537 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"}, |
|
512 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"}, | |
538 | {"new_added", (PyCFunction)dirstate_item_new_added, |
|
513 | {"new_added", (PyCFunction)dirstate_item_new_added, | |
539 | METH_NOARGS | METH_CLASS, |
|
514 | METH_NOARGS | METH_CLASS, | |
540 | "constructor to help legacy API to build a new \"added\" item"}, |
|
515 | "constructor to help legacy API to build a new \"added\" item"}, | |
541 | {"new_merged", (PyCFunction)dirstate_item_new_merged, |
|
516 | {"new_merged", (PyCFunction)dirstate_item_new_merged, | |
542 | METH_NOARGS | METH_CLASS, |
|
517 | METH_NOARGS | METH_CLASS, | |
543 | "constructor to help legacy API to build a new \"merged\" item"}, |
|
518 | "constructor to help legacy API to build a new \"merged\" item"}, | |
544 | {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2, |
|
519 | {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2, | |
545 | METH_NOARGS | METH_CLASS, |
|
520 | METH_NOARGS | METH_CLASS, | |
546 | "constructor to help legacy API to build a new \"from_p2\" item"}, |
|
521 | "constructor to help legacy API to build a new \"from_p2\" item"}, | |
547 | {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty, |
|
522 | {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty, | |
548 | METH_NOARGS | METH_CLASS, |
|
523 | METH_NOARGS | METH_CLASS, | |
549 | "constructor to help legacy API to build a new \"possibly_dirty\" item"}, |
|
524 | "constructor to help legacy API to build a new \"possibly_dirty\" item"}, | |
550 | {"new_normal", (PyCFunction)dirstate_item_new_normal, |
|
525 | {"new_normal", (PyCFunction)dirstate_item_new_normal, | |
551 | METH_VARARGS | METH_CLASS, |
|
526 | METH_VARARGS | METH_CLASS, | |
552 | "constructor to help legacy API to build a new \"normal\" item"}, |
|
527 | "constructor to help legacy API to build a new \"normal\" item"}, | |
553 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, |
|
528 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, | |
554 | METH_NOARGS, "mark a file as \"possibly dirty\""}, |
|
529 | METH_NOARGS, "mark a file as \"possibly dirty\""}, | |
555 | {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS, |
|
530 | {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS, | |
556 | "mark a file as \"clean\""}, |
|
531 | "mark a file as \"clean\""}, | |
557 | {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS, |
|
532 | {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS, | |
558 | "mark a file as \"tracked\""}, |
|
533 | "mark a file as \"tracked\""}, | |
559 | {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS, |
|
534 | {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS, | |
560 | "mark a file as \"untracked\""}, |
|
535 | "mark a file as \"untracked\""}, | |
561 | {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS, |
|
536 | {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS, | |
562 | "remove all \"merge-only\" from a DirstateItem"}, |
|
537 | "remove all \"merge-only\" from a DirstateItem"}, | |
563 | {NULL} /* Sentinel */ |
|
538 | {NULL} /* Sentinel */ | |
564 | }; |
|
539 | }; | |
565 |
|
540 | |||
566 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) |
|
541 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) | |
567 | { |
|
542 | { | |
568 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); |
|
543 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); | |
569 | }; |
|
544 | }; | |
570 |
|
545 | |||
571 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) |
|
546 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) | |
572 | { |
|
547 | { | |
573 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); |
|
548 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); | |
574 | }; |
|
549 | }; | |
575 |
|
550 | |||
576 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) |
|
551 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) | |
577 | { |
|
552 | { | |
578 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); |
|
553 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); | |
579 | }; |
|
554 | }; | |
580 |
|
555 | |||
581 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) |
|
556 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) | |
582 | { |
|
557 | { | |
583 | char state = dirstate_item_c_v1_state(self); |
|
558 | char state = dirstate_item_c_v1_state(self); | |
584 | return PyBytes_FromStringAndSize(&state, 1); |
|
559 | return PyBytes_FromStringAndSize(&state, 1); | |
585 | }; |
|
560 | }; | |
586 |
|
561 | |||
587 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) |
|
562 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) | |
588 | { |
|
563 | { | |
589 | if (dirstate_item_c_tracked(self)) { |
|
564 | if (dirstate_item_c_tracked(self)) { | |
590 | Py_RETURN_TRUE; |
|
565 | Py_RETURN_TRUE; | |
591 | } else { |
|
566 | } else { | |
592 | Py_RETURN_FALSE; |
|
567 | Py_RETURN_FALSE; | |
593 | } |
|
568 | } | |
594 | }; |
|
569 | }; | |
595 |
|
570 | |||
596 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) |
|
571 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) | |
597 | { |
|
572 | { | |
598 | if (dirstate_item_c_added(self)) { |
|
573 | if (dirstate_item_c_added(self)) { | |
599 | Py_RETURN_TRUE; |
|
574 | Py_RETURN_TRUE; | |
600 | } else { |
|
575 | } else { | |
601 | Py_RETURN_FALSE; |
|
576 | Py_RETURN_FALSE; | |
602 | } |
|
577 | } | |
603 | }; |
|
578 | }; | |
604 |
|
579 | |||
605 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) |
|
580 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) | |
606 | { |
|
581 | { | |
607 | if (dirstate_item_c_merged(self)) { |
|
582 | if (dirstate_item_c_merged(self)) { | |
608 | Py_RETURN_TRUE; |
|
583 | Py_RETURN_TRUE; | |
609 | } else { |
|
584 | } else { | |
610 | Py_RETURN_FALSE; |
|
585 | Py_RETURN_FALSE; | |
611 | } |
|
586 | } | |
612 | }; |
|
587 | }; | |
613 |
|
588 | |||
614 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) |
|
589 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) | |
615 | { |
|
590 | { | |
616 | if (dirstate_item_c_from_p2(self)) { |
|
591 | if (dirstate_item_c_from_p2(self)) { | |
617 | Py_RETURN_TRUE; |
|
592 | Py_RETURN_TRUE; | |
618 | } else { |
|
593 | } else { | |
619 | Py_RETURN_FALSE; |
|
594 | Py_RETURN_FALSE; | |
620 | } |
|
595 | } | |
621 | }; |
|
596 | }; | |
622 |
|
597 | |||
623 | static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self) |
|
598 | static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self) | |
624 | { |
|
599 | { | |
625 | if (!(self->flags & dirstate_flag_wc_tracked)) { |
|
600 | if (!(self->flags & dirstate_flag_wc_tracked)) { | |
626 | Py_RETURN_FALSE; |
|
601 | Py_RETURN_FALSE; | |
627 | } else if (dirstate_item_c_added(self)) { |
|
602 | } else if (!(self->flags & dirstate_flag_p1_tracked)) { | |
628 | Py_RETURN_FALSE; |
|
603 | Py_RETURN_FALSE; | |
629 |
} else if (self->flags & dirstate_flag_ |
|
604 | } else if (self->flags & dirstate_flag_p2_info) { | |
630 | Py_RETURN_FALSE; |
|
|||
631 | } else if (self->flags & dirstate_flag_clean_p2) { |
|
|||
632 | Py_RETURN_FALSE; |
|
605 | Py_RETURN_FALSE; | |
633 | } else { |
|
606 | } else { | |
634 | Py_RETURN_TRUE; |
|
607 | Py_RETURN_TRUE; | |
635 | } |
|
608 | } | |
636 | }; |
|
609 | }; | |
637 |
|
610 | |||
638 | static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self) |
|
611 | static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self) | |
639 | { |
|
612 | { | |
640 | unsigned char mask = dirstate_flag_wc_tracked | |
|
613 | if (dirstate_item_c_any_tracked(self)) { | |
641 | dirstate_flag_p1_tracked | |
|
|||
642 | dirstate_flag_p2_tracked; |
|
|||
643 | if ((self->flags & mask) != 0) { |
|
|||
644 | Py_RETURN_TRUE; |
|
614 | Py_RETURN_TRUE; | |
645 | } else { |
|
615 | } else { | |
646 | Py_RETURN_FALSE; |
|
616 | Py_RETURN_FALSE; | |
647 | } |
|
617 | } | |
648 | }; |
|
618 | }; | |
649 |
|
619 | |||
650 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) |
|
620 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) | |
651 | { |
|
621 | { | |
652 | if (dirstate_item_c_removed(self)) { |
|
622 | if (dirstate_item_c_removed(self)) { | |
653 | Py_RETURN_TRUE; |
|
623 | Py_RETURN_TRUE; | |
654 | } else { |
|
624 | } else { | |
655 | Py_RETURN_FALSE; |
|
625 | Py_RETURN_FALSE; | |
656 | } |
|
626 | } | |
657 | }; |
|
627 | }; | |
658 |
|
628 | |||
659 | static PyGetSetDef dirstate_item_getset[] = { |
|
629 | static PyGetSetDef dirstate_item_getset[] = { | |
660 | {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL}, |
|
630 | {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL}, | |
661 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, |
|
631 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, | |
662 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, |
|
632 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, | |
663 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, |
|
633 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, | |
664 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, |
|
634 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, | |
665 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, |
|
635 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, | |
666 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, |
|
636 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, | |
667 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, |
|
637 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, | |
668 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", |
|
638 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", | |
669 | NULL}, |
|
639 | NULL}, | |
670 | {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked", |
|
640 | {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked", | |
671 | NULL}, |
|
641 | NULL}, | |
672 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, |
|
642 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, | |
673 | {NULL} /* Sentinel */ |
|
643 | {NULL} /* Sentinel */ | |
674 | }; |
|
644 | }; | |
675 |
|
645 | |||
676 | PyTypeObject dirstateItemType = { |
|
646 | PyTypeObject dirstateItemType = { | |
677 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ |
|
647 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ | |
678 | "dirstate_tuple", /* tp_name */ |
|
648 | "dirstate_tuple", /* tp_name */ | |
679 | sizeof(dirstateItemObject), /* tp_basicsize */ |
|
649 | sizeof(dirstateItemObject), /* tp_basicsize */ | |
680 | 0, /* tp_itemsize */ |
|
650 | 0, /* tp_itemsize */ | |
681 | (destructor)dirstate_item_dealloc, /* tp_dealloc */ |
|
651 | (destructor)dirstate_item_dealloc, /* tp_dealloc */ | |
682 | 0, /* tp_print */ |
|
652 | 0, /* tp_print */ | |
683 | 0, /* tp_getattr */ |
|
653 | 0, /* tp_getattr */ | |
684 | 0, /* tp_setattr */ |
|
654 | 0, /* tp_setattr */ | |
685 | 0, /* tp_compare */ |
|
655 | 0, /* tp_compare */ | |
686 | 0, /* tp_repr */ |
|
656 | 0, /* tp_repr */ | |
687 | 0, /* tp_as_number */ |
|
657 | 0, /* tp_as_number */ | |
688 | 0, /* tp_as_sequence */ |
|
658 | 0, /* tp_as_sequence */ | |
689 | 0, /* tp_as_mapping */ |
|
659 | 0, /* tp_as_mapping */ | |
690 | 0, /* tp_hash */ |
|
660 | 0, /* tp_hash */ | |
691 | 0, /* tp_call */ |
|
661 | 0, /* tp_call */ | |
692 | 0, /* tp_str */ |
|
662 | 0, /* tp_str */ | |
693 | 0, /* tp_getattro */ |
|
663 | 0, /* tp_getattro */ | |
694 | 0, /* tp_setattro */ |
|
664 | 0, /* tp_setattro */ | |
695 | 0, /* tp_as_buffer */ |
|
665 | 0, /* tp_as_buffer */ | |
696 | Py_TPFLAGS_DEFAULT, /* tp_flags */ |
|
666 | Py_TPFLAGS_DEFAULT, /* tp_flags */ | |
697 | "dirstate tuple", /* tp_doc */ |
|
667 | "dirstate tuple", /* tp_doc */ | |
698 | 0, /* tp_traverse */ |
|
668 | 0, /* tp_traverse */ | |
699 | 0, /* tp_clear */ |
|
669 | 0, /* tp_clear */ | |
700 | 0, /* tp_richcompare */ |
|
670 | 0, /* tp_richcompare */ | |
701 | 0, /* tp_weaklistoffset */ |
|
671 | 0, /* tp_weaklistoffset */ | |
702 | 0, /* tp_iter */ |
|
672 | 0, /* tp_iter */ | |
703 | 0, /* tp_iternext */ |
|
673 | 0, /* tp_iternext */ | |
704 | dirstate_item_methods, /* tp_methods */ |
|
674 | dirstate_item_methods, /* tp_methods */ | |
705 | 0, /* tp_members */ |
|
675 | 0, /* tp_members */ | |
706 | dirstate_item_getset, /* tp_getset */ |
|
676 | dirstate_item_getset, /* tp_getset */ | |
707 | 0, /* tp_base */ |
|
677 | 0, /* tp_base */ | |
708 | 0, /* tp_dict */ |
|
678 | 0, /* tp_dict */ | |
709 | 0, /* tp_descr_get */ |
|
679 | 0, /* tp_descr_get */ | |
710 | 0, /* tp_descr_set */ |
|
680 | 0, /* tp_descr_set */ | |
711 | 0, /* tp_dictoffset */ |
|
681 | 0, /* tp_dictoffset */ | |
712 | 0, /* tp_init */ |
|
682 | 0, /* tp_init */ | |
713 | 0, /* tp_alloc */ |
|
683 | 0, /* tp_alloc */ | |
714 | dirstate_item_new, /* tp_new */ |
|
684 | dirstate_item_new, /* tp_new */ | |
715 | }; |
|
685 | }; | |
716 |
|
686 | |||
717 | static PyObject *parse_dirstate(PyObject *self, PyObject *args) |
|
687 | static PyObject *parse_dirstate(PyObject *self, PyObject *args) | |
718 | { |
|
688 | { | |
719 | PyObject *dmap, *cmap, *parents = NULL, *ret = NULL; |
|
689 | PyObject *dmap, *cmap, *parents = NULL, *ret = NULL; | |
720 | PyObject *fname = NULL, *cname = NULL, *entry = NULL; |
|
690 | PyObject *fname = NULL, *cname = NULL, *entry = NULL; | |
721 | char state, *cur, *str, *cpos; |
|
691 | char state, *cur, *str, *cpos; | |
722 | int mode, size, mtime; |
|
692 | int mode, size, mtime; | |
723 | unsigned int flen, pos = 40; |
|
693 | unsigned int flen, pos = 40; | |
724 | Py_ssize_t len = 40; |
|
694 | Py_ssize_t len = 40; | |
725 | Py_ssize_t readlen; |
|
695 | Py_ssize_t readlen; | |
726 |
|
696 | |||
727 | if (!PyArg_ParseTuple( |
|
697 | if (!PyArg_ParseTuple( | |
728 | args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"), |
|
698 | args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"), | |
729 | &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) { |
|
699 | &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) { | |
730 | goto quit; |
|
700 | goto quit; | |
731 | } |
|
701 | } | |
732 |
|
702 | |||
733 | len = readlen; |
|
703 | len = readlen; | |
734 |
|
704 | |||
735 | /* read parents */ |
|
705 | /* read parents */ | |
736 | if (len < 40) { |
|
706 | if (len < 40) { | |
737 | PyErr_SetString(PyExc_ValueError, |
|
707 | PyErr_SetString(PyExc_ValueError, | |
738 | "too little data for parents"); |
|
708 | "too little data for parents"); | |
739 | goto quit; |
|
709 | goto quit; | |
740 | } |
|
710 | } | |
741 |
|
711 | |||
742 | parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20, |
|
712 | parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20, | |
743 | str + 20, (Py_ssize_t)20); |
|
713 | str + 20, (Py_ssize_t)20); | |
744 | if (!parents) { |
|
714 | if (!parents) { | |
745 | goto quit; |
|
715 | goto quit; | |
746 | } |
|
716 | } | |
747 |
|
717 | |||
748 | /* read filenames */ |
|
718 | /* read filenames */ | |
749 | while (pos >= 40 && pos < len) { |
|
719 | while (pos >= 40 && pos < len) { | |
750 | if (pos + 17 > len) { |
|
720 | if (pos + 17 > len) { | |
751 | PyErr_SetString(PyExc_ValueError, |
|
721 | PyErr_SetString(PyExc_ValueError, | |
752 | "overflow in dirstate"); |
|
722 | "overflow in dirstate"); | |
753 | goto quit; |
|
723 | goto quit; | |
754 | } |
|
724 | } | |
755 | cur = str + pos; |
|
725 | cur = str + pos; | |
756 | /* unpack header */ |
|
726 | /* unpack header */ | |
757 | state = *cur; |
|
727 | state = *cur; | |
758 | mode = getbe32(cur + 1); |
|
728 | mode = getbe32(cur + 1); | |
759 | size = getbe32(cur + 5); |
|
729 | size = getbe32(cur + 5); | |
760 | mtime = getbe32(cur + 9); |
|
730 | mtime = getbe32(cur + 9); | |
761 | flen = getbe32(cur + 13); |
|
731 | flen = getbe32(cur + 13); | |
762 | pos += 17; |
|
732 | pos += 17; | |
763 | cur += 17; |
|
733 | cur += 17; | |
764 | if (flen > len - pos) { |
|
734 | if (flen > len - pos) { | |
765 | PyErr_SetString(PyExc_ValueError, |
|
735 | PyErr_SetString(PyExc_ValueError, | |
766 | "overflow in dirstate"); |
|
736 | "overflow in dirstate"); | |
767 | goto quit; |
|
737 | goto quit; | |
768 | } |
|
738 | } | |
769 |
|
739 | |||
770 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, |
|
740 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, | |
771 | size, mtime); |
|
741 | size, mtime); | |
772 | if (!entry) |
|
742 | if (!entry) | |
773 | goto quit; |
|
743 | goto quit; | |
774 | cpos = memchr(cur, 0, flen); |
|
744 | cpos = memchr(cur, 0, flen); | |
775 | if (cpos) { |
|
745 | if (cpos) { | |
776 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); |
|
746 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); | |
777 | cname = PyBytes_FromStringAndSize( |
|
747 | cname = PyBytes_FromStringAndSize( | |
778 | cpos + 1, flen - (cpos - cur) - 1); |
|
748 | cpos + 1, flen - (cpos - cur) - 1); | |
779 | if (!fname || !cname || |
|
749 | if (!fname || !cname || | |
780 | PyDict_SetItem(cmap, fname, cname) == -1 || |
|
750 | PyDict_SetItem(cmap, fname, cname) == -1 || | |
781 | PyDict_SetItem(dmap, fname, entry) == -1) { |
|
751 | PyDict_SetItem(dmap, fname, entry) == -1) { | |
782 | goto quit; |
|
752 | goto quit; | |
783 | } |
|
753 | } | |
784 | Py_DECREF(cname); |
|
754 | Py_DECREF(cname); | |
785 | } else { |
|
755 | } else { | |
786 | fname = PyBytes_FromStringAndSize(cur, flen); |
|
756 | fname = PyBytes_FromStringAndSize(cur, flen); | |
787 | if (!fname || |
|
757 | if (!fname || | |
788 | PyDict_SetItem(dmap, fname, entry) == -1) { |
|
758 | PyDict_SetItem(dmap, fname, entry) == -1) { | |
789 | goto quit; |
|
759 | goto quit; | |
790 | } |
|
760 | } | |
791 | } |
|
761 | } | |
792 | Py_DECREF(fname); |
|
762 | Py_DECREF(fname); | |
793 | Py_DECREF(entry); |
|
763 | Py_DECREF(entry); | |
794 | fname = cname = entry = NULL; |
|
764 | fname = cname = entry = NULL; | |
795 | pos += flen; |
|
765 | pos += flen; | |
796 | } |
|
766 | } | |
797 |
|
767 | |||
798 | ret = parents; |
|
768 | ret = parents; | |
799 | Py_INCREF(ret); |
|
769 | Py_INCREF(ret); | |
800 | quit: |
|
770 | quit: | |
801 | Py_XDECREF(fname); |
|
771 | Py_XDECREF(fname); | |
802 | Py_XDECREF(cname); |
|
772 | Py_XDECREF(cname); | |
803 | Py_XDECREF(entry); |
|
773 | Py_XDECREF(entry); | |
804 | Py_XDECREF(parents); |
|
774 | Py_XDECREF(parents); | |
805 | return ret; |
|
775 | return ret; | |
806 | } |
|
776 | } | |
807 |
|
777 | |||
808 | /* |
|
778 | /* | |
809 | * Efficiently pack a dirstate object into its on-disk format. |
|
779 | * Efficiently pack a dirstate object into its on-disk format. | |
810 | */ |
|
780 | */ | |
811 | static PyObject *pack_dirstate(PyObject *self, PyObject *args) |
|
781 | static PyObject *pack_dirstate(PyObject *self, PyObject *args) | |
812 | { |
|
782 | { | |
813 | PyObject *packobj = NULL; |
|
783 | PyObject *packobj = NULL; | |
814 | PyObject *map, *copymap, *pl, *mtime_unset = NULL; |
|
784 | PyObject *map, *copymap, *pl, *mtime_unset = NULL; | |
815 | Py_ssize_t nbytes, pos, l; |
|
785 | Py_ssize_t nbytes, pos, l; | |
816 | PyObject *k, *v = NULL, *pn; |
|
786 | PyObject *k, *v = NULL, *pn; | |
817 | char *p, *s; |
|
787 | char *p, *s; | |
818 | int now; |
|
788 | int now; | |
819 |
|
789 | |||
820 | if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map, |
|
790 | if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map, | |
821 | &PyDict_Type, ©map, &PyTuple_Type, &pl, |
|
791 | &PyDict_Type, ©map, &PyTuple_Type, &pl, | |
822 | &now)) { |
|
792 | &now)) { | |
823 | return NULL; |
|
793 | return NULL; | |
824 | } |
|
794 | } | |
825 |
|
795 | |||
826 | if (PyTuple_Size(pl) != 2) { |
|
796 | if (PyTuple_Size(pl) != 2) { | |
827 | PyErr_SetString(PyExc_TypeError, "expected 2-element tuple"); |
|
797 | PyErr_SetString(PyExc_TypeError, "expected 2-element tuple"); | |
828 | return NULL; |
|
798 | return NULL; | |
829 | } |
|
799 | } | |
830 |
|
800 | |||
831 | /* Figure out how much we need to allocate. */ |
|
801 | /* Figure out how much we need to allocate. */ | |
832 | for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
802 | for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) { | |
833 | PyObject *c; |
|
803 | PyObject *c; | |
834 | if (!PyBytes_Check(k)) { |
|
804 | if (!PyBytes_Check(k)) { | |
835 | PyErr_SetString(PyExc_TypeError, "expected string key"); |
|
805 | PyErr_SetString(PyExc_TypeError, "expected string key"); | |
836 | goto bail; |
|
806 | goto bail; | |
837 | } |
|
807 | } | |
838 | nbytes += PyBytes_GET_SIZE(k) + 17; |
|
808 | nbytes += PyBytes_GET_SIZE(k) + 17; | |
839 | c = PyDict_GetItem(copymap, k); |
|
809 | c = PyDict_GetItem(copymap, k); | |
840 | if (c) { |
|
810 | if (c) { | |
841 | if (!PyBytes_Check(c)) { |
|
811 | if (!PyBytes_Check(c)) { | |
842 | PyErr_SetString(PyExc_TypeError, |
|
812 | PyErr_SetString(PyExc_TypeError, | |
843 | "expected string key"); |
|
813 | "expected string key"); | |
844 | goto bail; |
|
814 | goto bail; | |
845 | } |
|
815 | } | |
846 | nbytes += PyBytes_GET_SIZE(c) + 1; |
|
816 | nbytes += PyBytes_GET_SIZE(c) + 1; | |
847 | } |
|
817 | } | |
848 | } |
|
818 | } | |
849 |
|
819 | |||
850 | packobj = PyBytes_FromStringAndSize(NULL, nbytes); |
|
820 | packobj = PyBytes_FromStringAndSize(NULL, nbytes); | |
851 | if (packobj == NULL) { |
|
821 | if (packobj == NULL) { | |
852 | goto bail; |
|
822 | goto bail; | |
853 | } |
|
823 | } | |
854 |
|
824 | |||
855 | p = PyBytes_AS_STRING(packobj); |
|
825 | p = PyBytes_AS_STRING(packobj); | |
856 |
|
826 | |||
857 | pn = PyTuple_GET_ITEM(pl, 0); |
|
827 | pn = PyTuple_GET_ITEM(pl, 0); | |
858 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { |
|
828 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { | |
859 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); |
|
829 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); | |
860 | goto bail; |
|
830 | goto bail; | |
861 | } |
|
831 | } | |
862 | memcpy(p, s, l); |
|
832 | memcpy(p, s, l); | |
863 | p += 20; |
|
833 | p += 20; | |
864 | pn = PyTuple_GET_ITEM(pl, 1); |
|
834 | pn = PyTuple_GET_ITEM(pl, 1); | |
865 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { |
|
835 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { | |
866 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); |
|
836 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); | |
867 | goto bail; |
|
837 | goto bail; | |
868 | } |
|
838 | } | |
869 | memcpy(p, s, l); |
|
839 | memcpy(p, s, l); | |
870 | p += 20; |
|
840 | p += 20; | |
871 |
|
841 | |||
872 | for (pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
842 | for (pos = 0; PyDict_Next(map, &pos, &k, &v);) { | |
873 | dirstateItemObject *tuple; |
|
843 | dirstateItemObject *tuple; | |
874 | char state; |
|
844 | char state; | |
875 | int mode, size, mtime; |
|
845 | int mode, size, mtime; | |
876 | Py_ssize_t len, l; |
|
846 | Py_ssize_t len, l; | |
877 | PyObject *o; |
|
847 | PyObject *o; | |
878 | char *t; |
|
848 | char *t; | |
879 |
|
849 | |||
880 | if (!dirstate_tuple_check(v)) { |
|
850 | if (!dirstate_tuple_check(v)) { | |
881 | PyErr_SetString(PyExc_TypeError, |
|
851 | PyErr_SetString(PyExc_TypeError, | |
882 | "expected a dirstate tuple"); |
|
852 | "expected a dirstate tuple"); | |
883 | goto bail; |
|
853 | goto bail; | |
884 | } |
|
854 | } | |
885 | tuple = (dirstateItemObject *)v; |
|
855 | tuple = (dirstateItemObject *)v; | |
886 |
|
856 | |||
887 | state = dirstate_item_c_v1_state(tuple); |
|
857 | state = dirstate_item_c_v1_state(tuple); | |
888 | mode = dirstate_item_c_v1_mode(tuple); |
|
858 | mode = dirstate_item_c_v1_mode(tuple); | |
889 | size = dirstate_item_c_v1_size(tuple); |
|
859 | size = dirstate_item_c_v1_size(tuple); | |
890 | mtime = dirstate_item_c_v1_mtime(tuple); |
|
860 | mtime = dirstate_item_c_v1_mtime(tuple); | |
891 | if (state == 'n' && mtime == now) { |
|
861 | if (state == 'n' && mtime == now) { | |
892 | /* See pure/parsers.py:pack_dirstate for why we do |
|
862 | /* See pure/parsers.py:pack_dirstate for why we do | |
893 | * this. */ |
|
863 | * this. */ | |
894 | mtime = -1; |
|
864 | mtime = -1; | |
895 | mtime_unset = (PyObject *)dirstate_item_from_v1_data( |
|
865 | mtime_unset = (PyObject *)dirstate_item_from_v1_data( | |
896 | state, mode, size, mtime); |
|
866 | state, mode, size, mtime); | |
897 | if (!mtime_unset) { |
|
867 | if (!mtime_unset) { | |
898 | goto bail; |
|
868 | goto bail; | |
899 | } |
|
869 | } | |
900 | if (PyDict_SetItem(map, k, mtime_unset) == -1) { |
|
870 | if (PyDict_SetItem(map, k, mtime_unset) == -1) { | |
901 | goto bail; |
|
871 | goto bail; | |
902 | } |
|
872 | } | |
903 | Py_DECREF(mtime_unset); |
|
873 | Py_DECREF(mtime_unset); | |
904 | mtime_unset = NULL; |
|
874 | mtime_unset = NULL; | |
905 | } |
|
875 | } | |
906 | *p++ = state; |
|
876 | *p++ = state; | |
907 | putbe32((uint32_t)mode, p); |
|
877 | putbe32((uint32_t)mode, p); | |
908 | putbe32((uint32_t)size, p + 4); |
|
878 | putbe32((uint32_t)size, p + 4); | |
909 | putbe32((uint32_t)mtime, p + 8); |
|
879 | putbe32((uint32_t)mtime, p + 8); | |
910 | t = p + 12; |
|
880 | t = p + 12; | |
911 | p += 16; |
|
881 | p += 16; | |
912 | len = PyBytes_GET_SIZE(k); |
|
882 | len = PyBytes_GET_SIZE(k); | |
913 | memcpy(p, PyBytes_AS_STRING(k), len); |
|
883 | memcpy(p, PyBytes_AS_STRING(k), len); | |
914 | p += len; |
|
884 | p += len; | |
915 | o = PyDict_GetItem(copymap, k); |
|
885 | o = PyDict_GetItem(copymap, k); | |
916 | if (o) { |
|
886 | if (o) { | |
917 | *p++ = '\0'; |
|
887 | *p++ = '\0'; | |
918 | l = PyBytes_GET_SIZE(o); |
|
888 | l = PyBytes_GET_SIZE(o); | |
919 | memcpy(p, PyBytes_AS_STRING(o), l); |
|
889 | memcpy(p, PyBytes_AS_STRING(o), l); | |
920 | p += l; |
|
890 | p += l; | |
921 | len += l + 1; |
|
891 | len += l + 1; | |
922 | } |
|
892 | } | |
923 | putbe32((uint32_t)len, t); |
|
893 | putbe32((uint32_t)len, t); | |
924 | } |
|
894 | } | |
925 |
|
895 | |||
926 | pos = p - PyBytes_AS_STRING(packobj); |
|
896 | pos = p - PyBytes_AS_STRING(packobj); | |
927 | if (pos != nbytes) { |
|
897 | if (pos != nbytes) { | |
928 | PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld", |
|
898 | PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld", | |
929 | (long)pos, (long)nbytes); |
|
899 | (long)pos, (long)nbytes); | |
930 | goto bail; |
|
900 | goto bail; | |
931 | } |
|
901 | } | |
932 |
|
902 | |||
933 | return packobj; |
|
903 | return packobj; | |
934 | bail: |
|
904 | bail: | |
935 | Py_XDECREF(mtime_unset); |
|
905 | Py_XDECREF(mtime_unset); | |
936 | Py_XDECREF(packobj); |
|
906 | Py_XDECREF(packobj); | |
937 | Py_XDECREF(v); |
|
907 | Py_XDECREF(v); | |
938 | return NULL; |
|
908 | return NULL; | |
939 | } |
|
909 | } | |
940 |
|
910 | |||
941 | #define BUMPED_FIX 1 |
|
911 | #define BUMPED_FIX 1 | |
942 | #define USING_SHA_256 2 |
|
912 | #define USING_SHA_256 2 | |
943 | #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1) |
|
913 | #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1) | |
944 |
|
914 | |||
945 | static PyObject *readshas(const char *source, unsigned char num, |
|
915 | static PyObject *readshas(const char *source, unsigned char num, | |
946 | Py_ssize_t hashwidth) |
|
916 | Py_ssize_t hashwidth) | |
947 | { |
|
917 | { | |
948 | int i; |
|
918 | int i; | |
949 | PyObject *list = PyTuple_New(num); |
|
919 | PyObject *list = PyTuple_New(num); | |
950 | if (list == NULL) { |
|
920 | if (list == NULL) { | |
951 | return NULL; |
|
921 | return NULL; | |
952 | } |
|
922 | } | |
953 | for (i = 0; i < num; i++) { |
|
923 | for (i = 0; i < num; i++) { | |
954 | PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth); |
|
924 | PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth); | |
955 | if (hash == NULL) { |
|
925 | if (hash == NULL) { | |
956 | Py_DECREF(list); |
|
926 | Py_DECREF(list); | |
957 | return NULL; |
|
927 | return NULL; | |
958 | } |
|
928 | } | |
959 | PyTuple_SET_ITEM(list, i, hash); |
|
929 | PyTuple_SET_ITEM(list, i, hash); | |
960 | source += hashwidth; |
|
930 | source += hashwidth; | |
961 | } |
|
931 | } | |
962 | return list; |
|
932 | return list; | |
963 | } |
|
933 | } | |
964 |
|
934 | |||
965 | static PyObject *fm1readmarker(const char *databegin, const char *dataend, |
|
935 | static PyObject *fm1readmarker(const char *databegin, const char *dataend, | |
966 | uint32_t *msize) |
|
936 | uint32_t *msize) | |
967 | { |
|
937 | { | |
968 | const char *data = databegin; |
|
938 | const char *data = databegin; | |
969 | const char *meta; |
|
939 | const char *meta; | |
970 |
|
940 | |||
971 | double mtime; |
|
941 | double mtime; | |
972 | int16_t tz; |
|
942 | int16_t tz; | |
973 | uint16_t flags; |
|
943 | uint16_t flags; | |
974 | unsigned char nsuccs, nparents, nmetadata; |
|
944 | unsigned char nsuccs, nparents, nmetadata; | |
975 | Py_ssize_t hashwidth = 20; |
|
945 | Py_ssize_t hashwidth = 20; | |
976 |
|
946 | |||
977 | PyObject *prec = NULL, *parents = NULL, *succs = NULL; |
|
947 | PyObject *prec = NULL, *parents = NULL, *succs = NULL; | |
978 | PyObject *metadata = NULL, *ret = NULL; |
|
948 | PyObject *metadata = NULL, *ret = NULL; | |
979 | int i; |
|
949 | int i; | |
980 |
|
950 | |||
981 | if (data + FM1_HEADER_SIZE > dataend) { |
|
951 | if (data + FM1_HEADER_SIZE > dataend) { | |
982 | goto overflow; |
|
952 | goto overflow; | |
983 | } |
|
953 | } | |
984 |
|
954 | |||
985 | *msize = getbe32(data); |
|
955 | *msize = getbe32(data); | |
986 | data += 4; |
|
956 | data += 4; | |
987 | mtime = getbefloat64(data); |
|
957 | mtime = getbefloat64(data); | |
988 | data += 8; |
|
958 | data += 8; | |
989 | tz = getbeint16(data); |
|
959 | tz = getbeint16(data); | |
990 | data += 2; |
|
960 | data += 2; | |
991 | flags = getbeuint16(data); |
|
961 | flags = getbeuint16(data); | |
992 | data += 2; |
|
962 | data += 2; | |
993 |
|
963 | |||
994 | if (flags & USING_SHA_256) { |
|
964 | if (flags & USING_SHA_256) { | |
995 | hashwidth = 32; |
|
965 | hashwidth = 32; | |
996 | } |
|
966 | } | |
997 |
|
967 | |||
998 | nsuccs = (unsigned char)(*data++); |
|
968 | nsuccs = (unsigned char)(*data++); | |
999 | nparents = (unsigned char)(*data++); |
|
969 | nparents = (unsigned char)(*data++); | |
1000 | nmetadata = (unsigned char)(*data++); |
|
970 | nmetadata = (unsigned char)(*data++); | |
1001 |
|
971 | |||
1002 | if (databegin + *msize > dataend) { |
|
972 | if (databegin + *msize > dataend) { | |
1003 | goto overflow; |
|
973 | goto overflow; | |
1004 | } |
|
974 | } | |
1005 | dataend = databegin + *msize; /* narrow down to marker size */ |
|
975 | dataend = databegin + *msize; /* narrow down to marker size */ | |
1006 |
|
976 | |||
1007 | if (data + hashwidth > dataend) { |
|
977 | if (data + hashwidth > dataend) { | |
1008 | goto overflow; |
|
978 | goto overflow; | |
1009 | } |
|
979 | } | |
1010 | prec = PyBytes_FromStringAndSize(data, hashwidth); |
|
980 | prec = PyBytes_FromStringAndSize(data, hashwidth); | |
1011 | data += hashwidth; |
|
981 | data += hashwidth; | |
1012 | if (prec == NULL) { |
|
982 | if (prec == NULL) { | |
1013 | goto bail; |
|
983 | goto bail; | |
1014 | } |
|
984 | } | |
1015 |
|
985 | |||
1016 | if (data + nsuccs * hashwidth > dataend) { |
|
986 | if (data + nsuccs * hashwidth > dataend) { | |
1017 | goto overflow; |
|
987 | goto overflow; | |
1018 | } |
|
988 | } | |
1019 | succs = readshas(data, nsuccs, hashwidth); |
|
989 | succs = readshas(data, nsuccs, hashwidth); | |
1020 | if (succs == NULL) { |
|
990 | if (succs == NULL) { | |
1021 | goto bail; |
|
991 | goto bail; | |
1022 | } |
|
992 | } | |
1023 | data += nsuccs * hashwidth; |
|
993 | data += nsuccs * hashwidth; | |
1024 |
|
994 | |||
1025 | if (nparents == 1 || nparents == 2) { |
|
995 | if (nparents == 1 || nparents == 2) { | |
1026 | if (data + nparents * hashwidth > dataend) { |
|
996 | if (data + nparents * hashwidth > dataend) { | |
1027 | goto overflow; |
|
997 | goto overflow; | |
1028 | } |
|
998 | } | |
1029 | parents = readshas(data, nparents, hashwidth); |
|
999 | parents = readshas(data, nparents, hashwidth); | |
1030 | if (parents == NULL) { |
|
1000 | if (parents == NULL) { | |
1031 | goto bail; |
|
1001 | goto bail; | |
1032 | } |
|
1002 | } | |
1033 | data += nparents * hashwidth; |
|
1003 | data += nparents * hashwidth; | |
1034 | } else { |
|
1004 | } else { | |
1035 | parents = Py_None; |
|
1005 | parents = Py_None; | |
1036 | Py_INCREF(parents); |
|
1006 | Py_INCREF(parents); | |
1037 | } |
|
1007 | } | |
1038 |
|
1008 | |||
1039 | if (data + 2 * nmetadata > dataend) { |
|
1009 | if (data + 2 * nmetadata > dataend) { | |
1040 | goto overflow; |
|
1010 | goto overflow; | |
1041 | } |
|
1011 | } | |
1042 | meta = data + (2 * nmetadata); |
|
1012 | meta = data + (2 * nmetadata); | |
1043 | metadata = PyTuple_New(nmetadata); |
|
1013 | metadata = PyTuple_New(nmetadata); | |
1044 | if (metadata == NULL) { |
|
1014 | if (metadata == NULL) { | |
1045 | goto bail; |
|
1015 | goto bail; | |
1046 | } |
|
1016 | } | |
1047 | for (i = 0; i < nmetadata; i++) { |
|
1017 | for (i = 0; i < nmetadata; i++) { | |
1048 | PyObject *tmp, *left = NULL, *right = NULL; |
|
1018 | PyObject *tmp, *left = NULL, *right = NULL; | |
1049 | Py_ssize_t leftsize = (unsigned char)(*data++); |
|
1019 | Py_ssize_t leftsize = (unsigned char)(*data++); | |
1050 | Py_ssize_t rightsize = (unsigned char)(*data++); |
|
1020 | Py_ssize_t rightsize = (unsigned char)(*data++); | |
1051 | if (meta + leftsize + rightsize > dataend) { |
|
1021 | if (meta + leftsize + rightsize > dataend) { | |
1052 | goto overflow; |
|
1022 | goto overflow; | |
1053 | } |
|
1023 | } | |
1054 | left = PyBytes_FromStringAndSize(meta, leftsize); |
|
1024 | left = PyBytes_FromStringAndSize(meta, leftsize); | |
1055 | meta += leftsize; |
|
1025 | meta += leftsize; | |
1056 | right = PyBytes_FromStringAndSize(meta, rightsize); |
|
1026 | right = PyBytes_FromStringAndSize(meta, rightsize); | |
1057 | meta += rightsize; |
|
1027 | meta += rightsize; | |
1058 | tmp = PyTuple_New(2); |
|
1028 | tmp = PyTuple_New(2); | |
1059 | if (!left || !right || !tmp) { |
|
1029 | if (!left || !right || !tmp) { | |
1060 | Py_XDECREF(left); |
|
1030 | Py_XDECREF(left); | |
1061 | Py_XDECREF(right); |
|
1031 | Py_XDECREF(right); | |
1062 | Py_XDECREF(tmp); |
|
1032 | Py_XDECREF(tmp); | |
1063 | goto bail; |
|
1033 | goto bail; | |
1064 | } |
|
1034 | } | |
1065 | PyTuple_SET_ITEM(tmp, 0, left); |
|
1035 | PyTuple_SET_ITEM(tmp, 0, left); | |
1066 | PyTuple_SET_ITEM(tmp, 1, right); |
|
1036 | PyTuple_SET_ITEM(tmp, 1, right); | |
1067 | PyTuple_SET_ITEM(metadata, i, tmp); |
|
1037 | PyTuple_SET_ITEM(metadata, i, tmp); | |
1068 | } |
|
1038 | } | |
1069 | ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime, |
|
1039 | ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime, | |
1070 | (int)tz * 60, parents); |
|
1040 | (int)tz * 60, parents); | |
1071 | goto bail; /* return successfully */ |
|
1041 | goto bail; /* return successfully */ | |
1072 |
|
1042 | |||
1073 | overflow: |
|
1043 | overflow: | |
1074 | PyErr_SetString(PyExc_ValueError, "overflow in obsstore"); |
|
1044 | PyErr_SetString(PyExc_ValueError, "overflow in obsstore"); | |
1075 | bail: |
|
1045 | bail: | |
1076 | Py_XDECREF(prec); |
|
1046 | Py_XDECREF(prec); | |
1077 | Py_XDECREF(succs); |
|
1047 | Py_XDECREF(succs); | |
1078 | Py_XDECREF(metadata); |
|
1048 | Py_XDECREF(metadata); | |
1079 | Py_XDECREF(parents); |
|
1049 | Py_XDECREF(parents); | |
1080 | return ret; |
|
1050 | return ret; | |
1081 | } |
|
1051 | } | |
1082 |
|
1052 | |||
1083 | static PyObject *fm1readmarkers(PyObject *self, PyObject *args) |
|
1053 | static PyObject *fm1readmarkers(PyObject *self, PyObject *args) | |
1084 | { |
|
1054 | { | |
1085 | const char *data, *dataend; |
|
1055 | const char *data, *dataend; | |
1086 | Py_ssize_t datalen, offset, stop; |
|
1056 | Py_ssize_t datalen, offset, stop; | |
1087 | PyObject *markers = NULL; |
|
1057 | PyObject *markers = NULL; | |
1088 |
|
1058 | |||
1089 | if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen, |
|
1059 | if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen, | |
1090 | &offset, &stop)) { |
|
1060 | &offset, &stop)) { | |
1091 | return NULL; |
|
1061 | return NULL; | |
1092 | } |
|
1062 | } | |
1093 | if (offset < 0) { |
|
1063 | if (offset < 0) { | |
1094 | PyErr_SetString(PyExc_ValueError, |
|
1064 | PyErr_SetString(PyExc_ValueError, | |
1095 | "invalid negative offset in fm1readmarkers"); |
|
1065 | "invalid negative offset in fm1readmarkers"); | |
1096 | return NULL; |
|
1066 | return NULL; | |
1097 | } |
|
1067 | } | |
1098 | if (stop > datalen) { |
|
1068 | if (stop > datalen) { | |
1099 | PyErr_SetString( |
|
1069 | PyErr_SetString( | |
1100 | PyExc_ValueError, |
|
1070 | PyExc_ValueError, | |
1101 | "stop longer than data length in fm1readmarkers"); |
|
1071 | "stop longer than data length in fm1readmarkers"); | |
1102 | return NULL; |
|
1072 | return NULL; | |
1103 | } |
|
1073 | } | |
1104 | dataend = data + datalen; |
|
1074 | dataend = data + datalen; | |
1105 | data += offset; |
|
1075 | data += offset; | |
1106 | markers = PyList_New(0); |
|
1076 | markers = PyList_New(0); | |
1107 | if (!markers) { |
|
1077 | if (!markers) { | |
1108 | return NULL; |
|
1078 | return NULL; | |
1109 | } |
|
1079 | } | |
1110 | while (offset < stop) { |
|
1080 | while (offset < stop) { | |
1111 | uint32_t msize; |
|
1081 | uint32_t msize; | |
1112 | int error; |
|
1082 | int error; | |
1113 | PyObject *record = fm1readmarker(data, dataend, &msize); |
|
1083 | PyObject *record = fm1readmarker(data, dataend, &msize); | |
1114 | if (!record) { |
|
1084 | if (!record) { | |
1115 | goto bail; |
|
1085 | goto bail; | |
1116 | } |
|
1086 | } | |
1117 | error = PyList_Append(markers, record); |
|
1087 | error = PyList_Append(markers, record); | |
1118 | Py_DECREF(record); |
|
1088 | Py_DECREF(record); | |
1119 | if (error) { |
|
1089 | if (error) { | |
1120 | goto bail; |
|
1090 | goto bail; | |
1121 | } |
|
1091 | } | |
1122 | data += msize; |
|
1092 | data += msize; | |
1123 | offset += msize; |
|
1093 | offset += msize; | |
1124 | } |
|
1094 | } | |
1125 | return markers; |
|
1095 | return markers; | |
1126 | bail: |
|
1096 | bail: | |
1127 | Py_DECREF(markers); |
|
1097 | Py_DECREF(markers); | |
1128 | return NULL; |
|
1098 | return NULL; | |
1129 | } |
|
1099 | } | |
1130 |
|
1100 | |||
1131 | static char parsers_doc[] = "Efficient content parsing."; |
|
1101 | static char parsers_doc[] = "Efficient content parsing."; | |
1132 |
|
1102 | |||
1133 | PyObject *encodedir(PyObject *self, PyObject *args); |
|
1103 | PyObject *encodedir(PyObject *self, PyObject *args); | |
1134 | PyObject *pathencode(PyObject *self, PyObject *args); |
|
1104 | PyObject *pathencode(PyObject *self, PyObject *args); | |
1135 | PyObject *lowerencode(PyObject *self, PyObject *args); |
|
1105 | PyObject *lowerencode(PyObject *self, PyObject *args); | |
1136 | PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs); |
|
1106 | PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs); | |
1137 |
|
1107 | |||
1138 | static PyMethodDef methods[] = { |
|
1108 | static PyMethodDef methods[] = { | |
1139 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, |
|
1109 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, | |
1140 | {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, |
|
1110 | {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, | |
1141 | {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS, |
|
1111 | {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS, | |
1142 | "parse a revlog index\n"}, |
|
1112 | "parse a revlog index\n"}, | |
1143 | {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"}, |
|
1113 | {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"}, | |
1144 | {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"}, |
|
1114 | {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"}, | |
1145 | {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"}, |
|
1115 | {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"}, | |
1146 | {"dict_new_presized", dict_new_presized, METH_VARARGS, |
|
1116 | {"dict_new_presized", dict_new_presized, METH_VARARGS, | |
1147 | "construct a dict with an expected size\n"}, |
|
1117 | "construct a dict with an expected size\n"}, | |
1148 | {"make_file_foldmap", make_file_foldmap, METH_VARARGS, |
|
1118 | {"make_file_foldmap", make_file_foldmap, METH_VARARGS, | |
1149 | "make file foldmap\n"}, |
|
1119 | "make file foldmap\n"}, | |
1150 | {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS, |
|
1120 | {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS, | |
1151 | "escape a UTF-8 byte string to JSON (fast path)\n"}, |
|
1121 | "escape a UTF-8 byte string to JSON (fast path)\n"}, | |
1152 | {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"}, |
|
1122 | {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"}, | |
1153 | {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"}, |
|
1123 | {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"}, | |
1154 | {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"}, |
|
1124 | {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"}, | |
1155 | {"fm1readmarkers", fm1readmarkers, METH_VARARGS, |
|
1125 | {"fm1readmarkers", fm1readmarkers, METH_VARARGS, | |
1156 | "parse v1 obsolete markers\n"}, |
|
1126 | "parse v1 obsolete markers\n"}, | |
1157 | {NULL, NULL}}; |
|
1127 | {NULL, NULL}}; | |
1158 |
|
1128 | |||
1159 | void dirs_module_init(PyObject *mod); |
|
1129 | void dirs_module_init(PyObject *mod); | |
1160 | void manifest_module_init(PyObject *mod); |
|
1130 | void manifest_module_init(PyObject *mod); | |
1161 | void revlog_module_init(PyObject *mod); |
|
1131 | void revlog_module_init(PyObject *mod); | |
1162 |
|
1132 | |||
1163 | static const int version = 20; |
|
1133 | static const int version = 20; | |
1164 |
|
1134 | |||
1165 | static void module_init(PyObject *mod) |
|
1135 | static void module_init(PyObject *mod) | |
1166 | { |
|
1136 | { | |
1167 | PyModule_AddIntConstant(mod, "version", version); |
|
1137 | PyModule_AddIntConstant(mod, "version", version); | |
1168 |
|
1138 | |||
1169 | /* This module constant has two purposes. First, it lets us unit test |
|
1139 | /* This module constant has two purposes. First, it lets us unit test | |
1170 | * the ImportError raised without hard-coding any error text. This |
|
1140 | * the ImportError raised without hard-coding any error text. This | |
1171 | * means we can change the text in the future without breaking tests, |
|
1141 | * means we can change the text in the future without breaking tests, | |
1172 | * even across changesets without a recompile. Second, its presence |
|
1142 | * even across changesets without a recompile. Second, its presence | |
1173 | * can be used to determine whether the version-checking logic is |
|
1143 | * can be used to determine whether the version-checking logic is | |
1174 | * present, which also helps in testing across changesets without a |
|
1144 | * present, which also helps in testing across changesets without a | |
1175 | * recompile. Note that this means the pure-Python version of parsers |
|
1145 | * recompile. Note that this means the pure-Python version of parsers | |
1176 | * should not have this module constant. */ |
|
1146 | * should not have this module constant. */ | |
1177 | PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext); |
|
1147 | PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext); | |
1178 |
|
1148 | |||
1179 | dirs_module_init(mod); |
|
1149 | dirs_module_init(mod); | |
1180 | manifest_module_init(mod); |
|
1150 | manifest_module_init(mod); | |
1181 | revlog_module_init(mod); |
|
1151 | revlog_module_init(mod); | |
1182 |
|
1152 | |||
1183 | if (PyType_Ready(&dirstateItemType) < 0) { |
|
1153 | if (PyType_Ready(&dirstateItemType) < 0) { | |
1184 | return; |
|
1154 | return; | |
1185 | } |
|
1155 | } | |
1186 | Py_INCREF(&dirstateItemType); |
|
1156 | Py_INCREF(&dirstateItemType); | |
1187 | PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType); |
|
1157 | PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType); | |
1188 | } |
|
1158 | } | |
1189 |
|
1159 | |||
1190 | static int check_python_version(void) |
|
1160 | static int check_python_version(void) | |
1191 | { |
|
1161 | { | |
1192 | PyObject *sys = PyImport_ImportModule("sys"), *ver; |
|
1162 | PyObject *sys = PyImport_ImportModule("sys"), *ver; | |
1193 | long hexversion; |
|
1163 | long hexversion; | |
1194 | if (!sys) { |
|
1164 | if (!sys) { | |
1195 | return -1; |
|
1165 | return -1; | |
1196 | } |
|
1166 | } | |
1197 | ver = PyObject_GetAttrString(sys, "hexversion"); |
|
1167 | ver = PyObject_GetAttrString(sys, "hexversion"); | |
1198 | Py_DECREF(sys); |
|
1168 | Py_DECREF(sys); | |
1199 | if (!ver) { |
|
1169 | if (!ver) { | |
1200 | return -1; |
|
1170 | return -1; | |
1201 | } |
|
1171 | } | |
1202 | hexversion = PyInt_AsLong(ver); |
|
1172 | hexversion = PyInt_AsLong(ver); | |
1203 | Py_DECREF(ver); |
|
1173 | Py_DECREF(ver); | |
1204 | /* sys.hexversion is a 32-bit number by default, so the -1 case |
|
1174 | /* sys.hexversion is a 32-bit number by default, so the -1 case | |
1205 | * should only occur in unusual circumstances (e.g. if sys.hexversion |
|
1175 | * should only occur in unusual circumstances (e.g. if sys.hexversion | |
1206 | * is manually set to an invalid value). */ |
|
1176 | * is manually set to an invalid value). */ | |
1207 | if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) { |
|
1177 | if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) { | |
1208 | PyErr_Format(PyExc_ImportError, |
|
1178 | PyErr_Format(PyExc_ImportError, | |
1209 | "%s: The Mercurial extension " |
|
1179 | "%s: The Mercurial extension " | |
1210 | "modules were compiled with Python " PY_VERSION |
|
1180 | "modules were compiled with Python " PY_VERSION | |
1211 | ", but " |
|
1181 | ", but " | |
1212 | "Mercurial is currently using Python with " |
|
1182 | "Mercurial is currently using Python with " | |
1213 | "sys.hexversion=%ld: " |
|
1183 | "sys.hexversion=%ld: " | |
1214 | "Python %s\n at: %s", |
|
1184 | "Python %s\n at: %s", | |
1215 | versionerrortext, hexversion, Py_GetVersion(), |
|
1185 | versionerrortext, hexversion, Py_GetVersion(), | |
1216 | Py_GetProgramFullPath()); |
|
1186 | Py_GetProgramFullPath()); | |
1217 | return -1; |
|
1187 | return -1; | |
1218 | } |
|
1188 | } | |
1219 | return 0; |
|
1189 | return 0; | |
1220 | } |
|
1190 | } | |
1221 |
|
1191 | |||
1222 | #ifdef IS_PY3K |
|
1192 | #ifdef IS_PY3K | |
1223 | static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers", |
|
1193 | static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers", | |
1224 | parsers_doc, -1, methods}; |
|
1194 | parsers_doc, -1, methods}; | |
1225 |
|
1195 | |||
1226 | PyMODINIT_FUNC PyInit_parsers(void) |
|
1196 | PyMODINIT_FUNC PyInit_parsers(void) | |
1227 | { |
|
1197 | { | |
1228 | PyObject *mod; |
|
1198 | PyObject *mod; | |
1229 |
|
1199 | |||
1230 | if (check_python_version() == -1) |
|
1200 | if (check_python_version() == -1) | |
1231 | return NULL; |
|
1201 | return NULL; | |
1232 | mod = PyModule_Create(&parsers_module); |
|
1202 | mod = PyModule_Create(&parsers_module); | |
1233 | module_init(mod); |
|
1203 | module_init(mod); | |
1234 | return mod; |
|
1204 | return mod; | |
1235 | } |
|
1205 | } | |
1236 | #else |
|
1206 | #else | |
1237 | PyMODINIT_FUNC initparsers(void) |
|
1207 | PyMODINIT_FUNC initparsers(void) | |
1238 | { |
|
1208 | { | |
1239 | PyObject *mod; |
|
1209 | PyObject *mod; | |
1240 |
|
1210 | |||
1241 | if (check_python_version() == -1) { |
|
1211 | if (check_python_version() == -1) { | |
1242 | return; |
|
1212 | return; | |
1243 | } |
|
1213 | } | |
1244 | mod = Py_InitModule3("parsers", methods, parsers_doc); |
|
1214 | mod = Py_InitModule3("parsers", methods, parsers_doc); | |
1245 | module_init(mod); |
|
1215 | module_init(mod); | |
1246 | } |
|
1216 | } | |
1247 | #endif |
|
1217 | #endif |
@@ -1,82 +1,80 | |||||
1 | /* |
|
1 | /* | |
2 | util.h - utility functions for interfacing with the various python APIs. |
|
2 | util.h - utility functions for interfacing with the various python APIs. | |
3 |
|
3 | |||
4 | This software may be used and distributed according to the terms of |
|
4 | This software may be used and distributed according to the terms of | |
5 | the GNU General Public License, incorporated herein by reference. |
|
5 | the GNU General Public License, incorporated herein by reference. | |
6 | */ |
|
6 | */ | |
7 |
|
7 | |||
8 | #ifndef _HG_UTIL_H_ |
|
8 | #ifndef _HG_UTIL_H_ | |
9 | #define _HG_UTIL_H_ |
|
9 | #define _HG_UTIL_H_ | |
10 |
|
10 | |||
11 | #include "compat.h" |
|
11 | #include "compat.h" | |
12 |
|
12 | |||
13 | #if PY_MAJOR_VERSION >= 3 |
|
13 | #if PY_MAJOR_VERSION >= 3 | |
14 | #define IS_PY3K |
|
14 | #define IS_PY3K | |
15 | #endif |
|
15 | #endif | |
16 |
|
16 | |||
17 | /* helper to switch things like string literal depending on Python version */ |
|
17 | /* helper to switch things like string literal depending on Python version */ | |
18 | #ifdef IS_PY3K |
|
18 | #ifdef IS_PY3K | |
19 | #define PY23(py2, py3) py3 |
|
19 | #define PY23(py2, py3) py3 | |
20 | #else |
|
20 | #else | |
21 | #define PY23(py2, py3) py2 |
|
21 | #define PY23(py2, py3) py2 | |
22 | #endif |
|
22 | #endif | |
23 |
|
23 | |||
24 | /* clang-format off */ |
|
24 | /* clang-format off */ | |
25 | typedef struct { |
|
25 | typedef struct { | |
26 | PyObject_HEAD |
|
26 | PyObject_HEAD | |
27 | unsigned char flags; |
|
27 | unsigned char flags; | |
28 | int mode; |
|
28 | int mode; | |
29 | int size; |
|
29 | int size; | |
30 | int mtime; |
|
30 | int mtime; | |
31 | } dirstateItemObject; |
|
31 | } dirstateItemObject; | |
32 | /* clang-format on */ |
|
32 | /* clang-format on */ | |
33 |
|
33 | |||
34 | static const unsigned char dirstate_flag_wc_tracked = 1; |
|
34 | static const unsigned char dirstate_flag_wc_tracked = 1; | |
35 | static const unsigned char dirstate_flag_p1_tracked = 1 << 1; |
|
35 | static const unsigned char dirstate_flag_p1_tracked = 1 << 1; | |
36 |
static const unsigned char dirstate_flag_p2_ |
|
36 | static const unsigned char dirstate_flag_p2_info = 1 << 2; | |
37 |
static const unsigned char dirstate_flag_ |
|
37 | static const unsigned char dirstate_flag_has_meaningful_data = 1 << 3; | |
38 |
static const unsigned char dirstate_flag_ |
|
38 | static const unsigned char dirstate_flag_has_meaningful_mtime = 1 << 4; | |
39 | static const unsigned char dirstate_flag_clean_p1 = 1 << 5; |
|
|||
40 | static const unsigned char dirstate_flag_clean_p2 = 1 << 6; |
|
|||
41 |
|
39 | |||
42 | extern PyTypeObject dirstateItemType; |
|
40 | extern PyTypeObject dirstateItemType; | |
43 | #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType) |
|
41 | #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType) | |
44 |
|
42 | |||
45 | #ifndef MIN |
|
43 | #ifndef MIN | |
46 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) |
|
44 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) | |
47 | #endif |
|
45 | #endif | |
48 | /* VC9 doesn't include bool and lacks stdbool.h based on my searching */ |
|
46 | /* VC9 doesn't include bool and lacks stdbool.h based on my searching */ | |
49 | #if defined(_MSC_VER) || __STDC_VERSION__ < 199901L |
|
47 | #if defined(_MSC_VER) || __STDC_VERSION__ < 199901L | |
50 | #define true 1 |
|
48 | #define true 1 | |
51 | #define false 0 |
|
49 | #define false 0 | |
52 | typedef unsigned char bool; |
|
50 | typedef unsigned char bool; | |
53 | #else |
|
51 | #else | |
54 | #include <stdbool.h> |
|
52 | #include <stdbool.h> | |
55 | #endif |
|
53 | #endif | |
56 |
|
54 | |||
57 | static inline PyObject *_dict_new_presized(Py_ssize_t expected_size) |
|
55 | static inline PyObject *_dict_new_presized(Py_ssize_t expected_size) | |
58 | { |
|
56 | { | |
59 | /* _PyDict_NewPresized expects a minused parameter, but it actually |
|
57 | /* _PyDict_NewPresized expects a minused parameter, but it actually | |
60 | creates a dictionary that's the nearest power of two bigger than the |
|
58 | creates a dictionary that's the nearest power of two bigger than the | |
61 | parameter. For example, with the initial minused = 1000, the |
|
59 | parameter. For example, with the initial minused = 1000, the | |
62 | dictionary created has size 1024. Of course in a lot of cases that |
|
60 | dictionary created has size 1024. Of course in a lot of cases that | |
63 | can be greater than the maximum load factor Python's dict object |
|
61 | can be greater than the maximum load factor Python's dict object | |
64 | expects (= 2/3), so as soon as we cross the threshold we'll resize |
|
62 | expects (= 2/3), so as soon as we cross the threshold we'll resize | |
65 | anyway. So create a dictionary that's at least 3/2 the size. */ |
|
63 | anyway. So create a dictionary that's at least 3/2 the size. */ | |
66 | return _PyDict_NewPresized(((1 + expected_size) / 2) * 3); |
|
64 | return _PyDict_NewPresized(((1 + expected_size) / 2) * 3); | |
67 | } |
|
65 | } | |
68 |
|
66 | |||
69 | /* Convert a PyInt or PyLong to a long. Returns false if there is an |
|
67 | /* Convert a PyInt or PyLong to a long. Returns false if there is an | |
70 | error, in which case an exception will already have been set. */ |
|
68 | error, in which case an exception will already have been set. */ | |
71 | static inline bool pylong_to_long(PyObject *pylong, long *out) |
|
69 | static inline bool pylong_to_long(PyObject *pylong, long *out) | |
72 | { |
|
70 | { | |
73 | *out = PyLong_AsLong(pylong); |
|
71 | *out = PyLong_AsLong(pylong); | |
74 | /* Fast path to avoid hitting PyErr_Occurred if the value was obviously |
|
72 | /* Fast path to avoid hitting PyErr_Occurred if the value was obviously | |
75 | * not an error. */ |
|
73 | * not an error. */ | |
76 | if (*out != -1) { |
|
74 | if (*out != -1) { | |
77 | return true; |
|
75 | return true; | |
78 | } |
|
76 | } | |
79 | return PyErr_Occurred() == NULL; |
|
77 | return PyErr_Occurred() == NULL; | |
80 | } |
|
78 | } | |
81 |
|
79 | |||
82 | #endif /* _HG_UTIL_H_ */ |
|
80 | #endif /* _HG_UTIL_H_ */ |
@@ -1,805 +1,781 | |||||
1 | # dirstatemap.py |
|
1 | # dirstatemap.py | |
2 | # |
|
2 | # | |
3 | # This software may be used and distributed according to the terms of the |
|
3 | # This software may be used and distributed according to the terms of the | |
4 | # GNU General Public License version 2 or any later version. |
|
4 | # GNU General Public License version 2 or any later version. | |
5 |
|
5 | |||
6 | from __future__ import absolute_import |
|
6 | from __future__ import absolute_import | |
7 |
|
7 | |||
8 | import errno |
|
8 | import errno | |
9 |
|
9 | |||
10 | from .i18n import _ |
|
10 | from .i18n import _ | |
11 |
|
11 | |||
12 | from . import ( |
|
12 | from . import ( | |
13 | error, |
|
13 | error, | |
14 | pathutil, |
|
14 | pathutil, | |
15 | policy, |
|
15 | policy, | |
16 | pycompat, |
|
16 | pycompat, | |
17 | txnutil, |
|
17 | txnutil, | |
18 | util, |
|
18 | util, | |
19 | ) |
|
19 | ) | |
20 |
|
20 | |||
21 | from .dirstateutils import ( |
|
21 | from .dirstateutils import ( | |
22 | docket as docketmod, |
|
22 | docket as docketmod, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | parsers = policy.importmod('parsers') |
|
25 | parsers = policy.importmod('parsers') | |
26 | rustmod = policy.importrust('dirstate') |
|
26 | rustmod = policy.importrust('dirstate') | |
27 |
|
27 | |||
28 | propertycache = util.propertycache |
|
28 | propertycache = util.propertycache | |
29 |
|
29 | |||
30 | if rustmod is None: |
|
30 | if rustmod is None: | |
31 | DirstateItem = parsers.DirstateItem |
|
31 | DirstateItem = parsers.DirstateItem | |
32 | else: |
|
32 | else: | |
33 | DirstateItem = rustmod.DirstateItem |
|
33 | DirstateItem = rustmod.DirstateItem | |
34 |
|
34 | |||
35 | rangemask = 0x7FFFFFFF |
|
35 | rangemask = 0x7FFFFFFF | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | class _dirstatemapcommon(object): |
|
38 | class _dirstatemapcommon(object): | |
39 | """ |
|
39 | """ | |
40 | Methods that are identical for both implementations of the dirstatemap |
|
40 | Methods that are identical for both implementations of the dirstatemap | |
41 | class, with and without Rust extensions enabled. |
|
41 | class, with and without Rust extensions enabled. | |
42 | """ |
|
42 | """ | |
43 |
|
43 | |||
44 | # please pytype |
|
44 | # please pytype | |
45 |
|
45 | |||
46 | _map = None |
|
46 | _map = None | |
47 | copymap = None |
|
47 | copymap = None | |
48 |
|
48 | |||
49 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
49 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
50 | self._use_dirstate_v2 = use_dirstate_v2 |
|
50 | self._use_dirstate_v2 = use_dirstate_v2 | |
51 | self._nodeconstants = nodeconstants |
|
51 | self._nodeconstants = nodeconstants | |
52 | self._ui = ui |
|
52 | self._ui = ui | |
53 | self._opener = opener |
|
53 | self._opener = opener | |
54 | self._root = root |
|
54 | self._root = root | |
55 | self._filename = b'dirstate' |
|
55 | self._filename = b'dirstate' | |
56 | self._nodelen = 20 # Also update Rust code when changing this! |
|
56 | self._nodelen = 20 # Also update Rust code when changing this! | |
57 | self._parents = None |
|
57 | self._parents = None | |
58 | self._dirtyparents = False |
|
58 | self._dirtyparents = False | |
59 |
|
59 | |||
60 | # for consistent view between _pl() and _read() invocations |
|
60 | # for consistent view between _pl() and _read() invocations | |
61 | self._pendingmode = None |
|
61 | self._pendingmode = None | |
62 |
|
62 | |||
63 | def preload(self): |
|
63 | def preload(self): | |
64 | """Loads the underlying data, if it's not already loaded""" |
|
64 | """Loads the underlying data, if it's not already loaded""" | |
65 | self._map |
|
65 | self._map | |
66 |
|
66 | |||
67 | def get(self, key, default=None): |
|
67 | def get(self, key, default=None): | |
68 | return self._map.get(key, default) |
|
68 | return self._map.get(key, default) | |
69 |
|
69 | |||
70 | def __len__(self): |
|
70 | def __len__(self): | |
71 | return len(self._map) |
|
71 | return len(self._map) | |
72 |
|
72 | |||
73 | def __iter__(self): |
|
73 | def __iter__(self): | |
74 | return iter(self._map) |
|
74 | return iter(self._map) | |
75 |
|
75 | |||
76 | def __contains__(self, key): |
|
76 | def __contains__(self, key): | |
77 | return key in self._map |
|
77 | return key in self._map | |
78 |
|
78 | |||
79 | def __getitem__(self, item): |
|
79 | def __getitem__(self, item): | |
80 | return self._map[item] |
|
80 | return self._map[item] | |
81 |
|
81 | |||
82 | ### sub-class utility method |
|
82 | ### sub-class utility method | |
83 | # |
|
83 | # | |
84 | # Use to allow for generic implementation of some method while still coping |
|
84 | # Use to allow for generic implementation of some method while still coping | |
85 | # with minor difference between implementation. |
|
85 | # with minor difference between implementation. | |
86 |
|
86 | |||
87 | def _dirs_incr(self, filename, old_entry=None): |
|
87 | def _dirs_incr(self, filename, old_entry=None): | |
88 | """incremente the dirstate counter if applicable |
|
88 | """incremente the dirstate counter if applicable | |
89 |
|
89 | |||
90 | This might be a no-op for some subclass who deal with directory |
|
90 | This might be a no-op for some subclass who deal with directory | |
91 | tracking in a different way. |
|
91 | tracking in a different way. | |
92 | """ |
|
92 | """ | |
93 |
|
93 | |||
94 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
94 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): | |
95 | """decremente the dirstate counter if applicable |
|
95 | """decremente the dirstate counter if applicable | |
96 |
|
96 | |||
97 | This might be a no-op for some subclass who deal with directory |
|
97 | This might be a no-op for some subclass who deal with directory | |
98 | tracking in a different way. |
|
98 | tracking in a different way. | |
99 | """ |
|
99 | """ | |
100 |
|
100 | |||
101 | def _refresh_entry(self, f, entry): |
|
101 | def _refresh_entry(self, f, entry): | |
102 | """record updated state of an entry""" |
|
102 | """record updated state of an entry""" | |
103 |
|
103 | |||
104 | def _insert_entry(self, f, entry): |
|
104 | def _insert_entry(self, f, entry): | |
105 | """add a new dirstate entry (or replace an unrelated one) |
|
105 | """add a new dirstate entry (or replace an unrelated one) | |
106 |
|
106 | |||
107 | The fact it is actually new is the responsability of the caller |
|
107 | The fact it is actually new is the responsability of the caller | |
108 | """ |
|
108 | """ | |
109 |
|
109 | |||
110 | def _drop_entry(self, f): |
|
110 | def _drop_entry(self, f): | |
111 | """remove any entry for file f |
|
111 | """remove any entry for file f | |
112 |
|
112 | |||
113 | This should also drop associated copy information |
|
113 | This should also drop associated copy information | |
114 |
|
114 | |||
115 | The fact we actually need to drop it is the responsability of the caller""" |
|
115 | The fact we actually need to drop it is the responsability of the caller""" | |
116 |
|
116 | |||
117 | ### method to manipulate the entries |
|
117 | ### method to manipulate the entries | |
118 |
|
118 | |||
119 | def set_possibly_dirty(self, filename): |
|
119 | def set_possibly_dirty(self, filename): | |
120 | """record that the current state of the file on disk is unknown""" |
|
120 | """record that the current state of the file on disk is unknown""" | |
121 | entry = self[filename] |
|
121 | entry = self[filename] | |
122 | entry.set_possibly_dirty() |
|
122 | entry.set_possibly_dirty() | |
123 | self._refresh_entry(filename, entry) |
|
123 | self._refresh_entry(filename, entry) | |
124 |
|
124 | |||
125 | def set_clean(self, filename, mode, size, mtime): |
|
125 | def set_clean(self, filename, mode, size, mtime): | |
126 | """mark a file as back to a clean state""" |
|
126 | """mark a file as back to a clean state""" | |
127 | entry = self[filename] |
|
127 | entry = self[filename] | |
128 | mtime = mtime & rangemask |
|
128 | mtime = mtime & rangemask | |
129 | size = size & rangemask |
|
129 | size = size & rangemask | |
130 | entry.set_clean(mode, size, mtime) |
|
130 | entry.set_clean(mode, size, mtime) | |
131 | self._refresh_entry(filename, entry) |
|
131 | self._refresh_entry(filename, entry) | |
132 | self.copymap.pop(filename, None) |
|
132 | self.copymap.pop(filename, None) | |
133 |
|
133 | |||
134 | def set_tracked(self, filename): |
|
134 | def set_tracked(self, filename): | |
135 | new = False |
|
135 | new = False | |
136 | entry = self.get(filename) |
|
136 | entry = self.get(filename) | |
137 | if entry is None: |
|
137 | if entry is None: | |
138 | self._dirs_incr(filename) |
|
138 | self._dirs_incr(filename) | |
139 | entry = DirstateItem( |
|
139 | entry = DirstateItem( | |
140 | p1_tracked=False, |
|
|||
141 | p2_tracked=False, |
|
|||
142 | wc_tracked=True, |
|
140 | wc_tracked=True, | |
143 | merged=False, |
|
|||
144 | clean_p1=False, |
|
|||
145 | clean_p2=False, |
|
|||
146 | possibly_dirty=False, |
|
|||
147 | parentfiledata=None, |
|
|||
148 | ) |
|
141 | ) | |
149 |
|
142 | |||
150 | self._insert_entry(filename, entry) |
|
143 | self._insert_entry(filename, entry) | |
151 | new = True |
|
144 | new = True | |
152 | elif not entry.tracked: |
|
145 | elif not entry.tracked: | |
153 | self._dirs_incr(filename, entry) |
|
146 | self._dirs_incr(filename, entry) | |
154 | entry.set_tracked() |
|
147 | entry.set_tracked() | |
155 | self._refresh_entry(filename, entry) |
|
148 | self._refresh_entry(filename, entry) | |
156 | new = True |
|
149 | new = True | |
157 | else: |
|
150 | else: | |
158 | # XXX This is probably overkill for more case, but we need this to |
|
151 | # XXX This is probably overkill for more case, but we need this to | |
159 | # fully replace the `normallookup` call with `set_tracked` one. |
|
152 | # fully replace the `normallookup` call with `set_tracked` one. | |
160 | # Consider smoothing this in the future. |
|
153 | # Consider smoothing this in the future. | |
161 | entry.set_possibly_dirty() |
|
154 | entry.set_possibly_dirty() | |
162 | self._refresh_entry(filename, entry) |
|
155 | self._refresh_entry(filename, entry) | |
163 | return new |
|
156 | return new | |
164 |
|
157 | |||
165 | def set_untracked(self, f): |
|
158 | def set_untracked(self, f): | |
166 | """Mark a file as no longer tracked in the dirstate map""" |
|
159 | """Mark a file as no longer tracked in the dirstate map""" | |
167 | entry = self.get(f) |
|
160 | entry = self.get(f) | |
168 | if entry is None: |
|
161 | if entry is None: | |
169 | return False |
|
162 | return False | |
170 | else: |
|
163 | else: | |
171 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) |
|
164 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) | |
172 | if not entry.merged: |
|
165 | if not entry.merged: | |
173 | self.copymap.pop(f, None) |
|
166 | self.copymap.pop(f, None) | |
174 | entry.set_untracked() |
|
167 | entry.set_untracked() | |
175 | self._refresh_entry(f, entry) |
|
168 | self._refresh_entry(f, entry) | |
176 | return True |
|
169 | return True | |
177 |
|
170 | |||
178 | def reset_state( |
|
171 | def reset_state( | |
179 | self, |
|
172 | self, | |
180 | filename, |
|
173 | filename, | |
181 | wc_tracked=False, |
|
174 | wc_tracked=False, | |
182 | p1_tracked=False, |
|
175 | p1_tracked=False, | |
183 | p2_tracked=False, |
|
176 | p2_tracked=False, | |
184 | merged=False, |
|
177 | merged=False, | |
185 | clean_p1=False, |
|
178 | clean_p1=False, | |
186 | clean_p2=False, |
|
179 | clean_p2=False, | |
187 | possibly_dirty=False, |
|
180 | possibly_dirty=False, | |
188 | parentfiledata=None, |
|
181 | parentfiledata=None, | |
189 | ): |
|
182 | ): | |
190 | """Set a entry to a given state, diregarding all previous state |
|
183 | """Set a entry to a given state, diregarding all previous state | |
191 |
|
184 | |||
192 | This is to be used by the part of the dirstate API dedicated to |
|
185 | This is to be used by the part of the dirstate API dedicated to | |
193 | adjusting the dirstate after a update/merge. |
|
186 | adjusting the dirstate after a update/merge. | |
194 |
|
187 | |||
195 | note: calling this might result to no entry existing at all if the |
|
188 | note: calling this might result to no entry existing at all if the | |
196 | dirstate map does not see any point at having one for this file |
|
189 | dirstate map does not see any point at having one for this file | |
197 | anymore. |
|
190 | anymore. | |
198 | """ |
|
191 | """ | |
199 | if merged and (clean_p1 or clean_p2): |
|
192 | if merged and (clean_p1 or clean_p2): | |
200 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' |
|
193 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' | |
201 | raise error.ProgrammingError(msg) |
|
194 | raise error.ProgrammingError(msg) | |
202 | # copy information are now outdated |
|
195 | # copy information are now outdated | |
203 | # (maybe new information should be in directly passed to this function) |
|
196 | # (maybe new information should be in directly passed to this function) | |
204 | self.copymap.pop(filename, None) |
|
197 | self.copymap.pop(filename, None) | |
205 |
|
198 | |||
206 | if not (p1_tracked or p2_tracked or wc_tracked): |
|
199 | if not (p1_tracked or p2_tracked or wc_tracked): | |
207 | old_entry = self._map.get(filename) |
|
200 | old_entry = self._map.get(filename) | |
208 | self._drop_entry(filename) |
|
201 | self._drop_entry(filename) | |
209 | self._dirs_decr(filename, old_entry=old_entry) |
|
202 | self._dirs_decr(filename, old_entry=old_entry) | |
210 | return |
|
203 | return | |
211 | elif merged: |
|
204 | ||
212 | pass |
|
205 | p2_info = merged or clean_p2 | |
213 | elif not (p1_tracked or p2_tracked) and wc_tracked: |
|
206 | if merged: | |
214 | pass # file is added, nothing special to adjust |
|
207 | assert p1_tracked | |
215 | elif (p1_tracked or p2_tracked) and not wc_tracked: |
|
208 | ||
216 | pass |
|
209 | has_meaningful_mtime = not possibly_dirty | |
217 | elif clean_p2 and wc_tracked: |
|
|||
218 | pass |
|
|||
219 | elif not p1_tracked and p2_tracked and wc_tracked: |
|
|||
220 | clean_p2 = True |
|
|||
221 | elif possibly_dirty: |
|
|||
222 | pass |
|
|||
223 | elif wc_tracked: |
|
|||
224 | # this is a "normal" file |
|
|||
225 | if parentfiledata is None: |
|
|||
226 | msg = b'failed to pass parentfiledata for a normal file: %s' |
|
|||
227 | msg %= filename |
|
|||
228 | raise error.ProgrammingError(msg) |
|
|||
229 | else: |
|
|||
230 | assert False, 'unreachable' |
|
|||
231 |
|
210 | |||
232 | old_entry = self._map.get(filename) |
|
211 | old_entry = self._map.get(filename) | |
233 | self._dirs_incr(filename, old_entry) |
|
212 | self._dirs_incr(filename, old_entry) | |
234 | entry = DirstateItem( |
|
213 | entry = DirstateItem( | |
235 | wc_tracked=wc_tracked, |
|
214 | wc_tracked=wc_tracked, | |
236 | p1_tracked=p1_tracked, |
|
215 | p1_tracked=p1_tracked, | |
237 |
p2_ |
|
216 | p2_info=p2_info, | |
238 | merged=merged, |
|
217 | has_meaningful_mtime=has_meaningful_mtime, | |
239 | clean_p1=clean_p1, |
|
|||
240 | clean_p2=clean_p2, |
|
|||
241 | possibly_dirty=possibly_dirty, |
|
|||
242 | parentfiledata=parentfiledata, |
|
218 | parentfiledata=parentfiledata, | |
243 | ) |
|
219 | ) | |
244 | self._insert_entry(filename, entry) |
|
220 | self._insert_entry(filename, entry) | |
245 |
|
221 | |||
246 |
|
222 | |||
247 | class dirstatemap(_dirstatemapcommon): |
|
223 | class dirstatemap(_dirstatemapcommon): | |
248 | """Map encapsulating the dirstate's contents. |
|
224 | """Map encapsulating the dirstate's contents. | |
249 |
|
225 | |||
250 | The dirstate contains the following state: |
|
226 | The dirstate contains the following state: | |
251 |
|
227 | |||
252 | - `identity` is the identity of the dirstate file, which can be used to |
|
228 | - `identity` is the identity of the dirstate file, which can be used to | |
253 | detect when changes have occurred to the dirstate file. |
|
229 | detect when changes have occurred to the dirstate file. | |
254 |
|
230 | |||
255 | - `parents` is a pair containing the parents of the working copy. The |
|
231 | - `parents` is a pair containing the parents of the working copy. The | |
256 | parents are updated by calling `setparents`. |
|
232 | parents are updated by calling `setparents`. | |
257 |
|
233 | |||
258 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
234 | - the state map maps filenames to tuples of (state, mode, size, mtime), | |
259 | where state is a single character representing 'normal', 'added', |
|
235 | where state is a single character representing 'normal', 'added', | |
260 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
236 | 'removed', or 'merged'. It is read by treating the dirstate as a | |
261 | dict. File state is updated by calling various methods (see each |
|
237 | dict. File state is updated by calling various methods (see each | |
262 | documentation for details): |
|
238 | documentation for details): | |
263 |
|
239 | |||
264 | - `reset_state`, |
|
240 | - `reset_state`, | |
265 | - `set_tracked` |
|
241 | - `set_tracked` | |
266 | - `set_untracked` |
|
242 | - `set_untracked` | |
267 | - `set_clean` |
|
243 | - `set_clean` | |
268 | - `set_possibly_dirty` |
|
244 | - `set_possibly_dirty` | |
269 |
|
245 | |||
270 | - `copymap` maps destination filenames to their source filename. |
|
246 | - `copymap` maps destination filenames to their source filename. | |
271 |
|
247 | |||
272 | The dirstate also provides the following views onto the state: |
|
248 | The dirstate also provides the following views onto the state: | |
273 |
|
249 | |||
274 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
250 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized | |
275 | form that they appear as in the dirstate. |
|
251 | form that they appear as in the dirstate. | |
276 |
|
252 | |||
277 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
253 | - `dirfoldmap` is a dict mapping normalized directory names to the | |
278 | denormalized form that they appear as in the dirstate. |
|
254 | denormalized form that they appear as in the dirstate. | |
279 | """ |
|
255 | """ | |
280 |
|
256 | |||
281 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
257 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
282 | super(dirstatemap, self).__init__( |
|
258 | super(dirstatemap, self).__init__( | |
283 | ui, opener, root, nodeconstants, use_dirstate_v2 |
|
259 | ui, opener, root, nodeconstants, use_dirstate_v2 | |
284 | ) |
|
260 | ) | |
285 | if self._use_dirstate_v2: |
|
261 | if self._use_dirstate_v2: | |
286 | msg = "Dirstate V2 not supportedi" |
|
262 | msg = "Dirstate V2 not supportedi" | |
287 | msg += "(should have detected unsupported requirement)" |
|
263 | msg += "(should have detected unsupported requirement)" | |
288 | raise error.ProgrammingError(msg) |
|
264 | raise error.ProgrammingError(msg) | |
289 |
|
265 | |||
290 | ### Core data storage and access |
|
266 | ### Core data storage and access | |
291 |
|
267 | |||
292 | @propertycache |
|
268 | @propertycache | |
293 | def _map(self): |
|
269 | def _map(self): | |
294 | self._map = {} |
|
270 | self._map = {} | |
295 | self.read() |
|
271 | self.read() | |
296 | return self._map |
|
272 | return self._map | |
297 |
|
273 | |||
298 | @propertycache |
|
274 | @propertycache | |
299 | def copymap(self): |
|
275 | def copymap(self): | |
300 | self.copymap = {} |
|
276 | self.copymap = {} | |
301 | self._map |
|
277 | self._map | |
302 | return self.copymap |
|
278 | return self.copymap | |
303 |
|
279 | |||
304 | def clear(self): |
|
280 | def clear(self): | |
305 | self._map.clear() |
|
281 | self._map.clear() | |
306 | self.copymap.clear() |
|
282 | self.copymap.clear() | |
307 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) |
|
283 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) | |
308 | util.clearcachedproperty(self, b"_dirs") |
|
284 | util.clearcachedproperty(self, b"_dirs") | |
309 | util.clearcachedproperty(self, b"_alldirs") |
|
285 | util.clearcachedproperty(self, b"_alldirs") | |
310 | util.clearcachedproperty(self, b"filefoldmap") |
|
286 | util.clearcachedproperty(self, b"filefoldmap") | |
311 | util.clearcachedproperty(self, b"dirfoldmap") |
|
287 | util.clearcachedproperty(self, b"dirfoldmap") | |
312 |
|
288 | |||
313 | def items(self): |
|
289 | def items(self): | |
314 | return pycompat.iteritems(self._map) |
|
290 | return pycompat.iteritems(self._map) | |
315 |
|
291 | |||
316 | # forward for python2,3 compat |
|
292 | # forward for python2,3 compat | |
317 | iteritems = items |
|
293 | iteritems = items | |
318 |
|
294 | |||
319 | def debug_iter(self, all): |
|
295 | def debug_iter(self, all): | |
320 | """ |
|
296 | """ | |
321 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
297 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
322 |
|
298 | |||
323 | `all` is unused when Rust is not enabled |
|
299 | `all` is unused when Rust is not enabled | |
324 | """ |
|
300 | """ | |
325 | for (filename, item) in self.items(): |
|
301 | for (filename, item) in self.items(): | |
326 | yield (filename, item.state, item.mode, item.size, item.mtime) |
|
302 | yield (filename, item.state, item.mode, item.size, item.mtime) | |
327 |
|
303 | |||
328 | def keys(self): |
|
304 | def keys(self): | |
329 | return self._map.keys() |
|
305 | return self._map.keys() | |
330 |
|
306 | |||
331 | ### reading/setting parents |
|
307 | ### reading/setting parents | |
332 |
|
308 | |||
333 | def parents(self): |
|
309 | def parents(self): | |
334 | if not self._parents: |
|
310 | if not self._parents: | |
335 | try: |
|
311 | try: | |
336 | fp = self._opendirstatefile() |
|
312 | fp = self._opendirstatefile() | |
337 | st = fp.read(2 * self._nodelen) |
|
313 | st = fp.read(2 * self._nodelen) | |
338 | fp.close() |
|
314 | fp.close() | |
339 | except IOError as err: |
|
315 | except IOError as err: | |
340 | if err.errno != errno.ENOENT: |
|
316 | if err.errno != errno.ENOENT: | |
341 | raise |
|
317 | raise | |
342 | # File doesn't exist, so the current state is empty |
|
318 | # File doesn't exist, so the current state is empty | |
343 | st = b'' |
|
319 | st = b'' | |
344 |
|
320 | |||
345 | l = len(st) |
|
321 | l = len(st) | |
346 | if l == self._nodelen * 2: |
|
322 | if l == self._nodelen * 2: | |
347 | self._parents = ( |
|
323 | self._parents = ( | |
348 | st[: self._nodelen], |
|
324 | st[: self._nodelen], | |
349 | st[self._nodelen : 2 * self._nodelen], |
|
325 | st[self._nodelen : 2 * self._nodelen], | |
350 | ) |
|
326 | ) | |
351 | elif l == 0: |
|
327 | elif l == 0: | |
352 | self._parents = ( |
|
328 | self._parents = ( | |
353 | self._nodeconstants.nullid, |
|
329 | self._nodeconstants.nullid, | |
354 | self._nodeconstants.nullid, |
|
330 | self._nodeconstants.nullid, | |
355 | ) |
|
331 | ) | |
356 | else: |
|
332 | else: | |
357 | raise error.Abort( |
|
333 | raise error.Abort( | |
358 | _(b'working directory state appears damaged!') |
|
334 | _(b'working directory state appears damaged!') | |
359 | ) |
|
335 | ) | |
360 |
|
336 | |||
361 | return self._parents |
|
337 | return self._parents | |
362 |
|
338 | |||
363 | def setparents(self, p1, p2, fold_p2=False): |
|
339 | def setparents(self, p1, p2, fold_p2=False): | |
364 | self._parents = (p1, p2) |
|
340 | self._parents = (p1, p2) | |
365 | self._dirtyparents = True |
|
341 | self._dirtyparents = True | |
366 | copies = {} |
|
342 | copies = {} | |
367 | if fold_p2: |
|
343 | if fold_p2: | |
368 | for f, s in pycompat.iteritems(self._map): |
|
344 | for f, s in pycompat.iteritems(self._map): | |
369 | # Discard "merged" markers when moving away from a merge state |
|
345 | # Discard "merged" markers when moving away from a merge state | |
370 | if s.merged or s.from_p2: |
|
346 | if s.merged or s.from_p2: | |
371 | source = self.copymap.pop(f, None) |
|
347 | source = self.copymap.pop(f, None) | |
372 | if source: |
|
348 | if source: | |
373 | copies[f] = source |
|
349 | copies[f] = source | |
374 | s.drop_merge_data() |
|
350 | s.drop_merge_data() | |
375 | return copies |
|
351 | return copies | |
376 |
|
352 | |||
377 | ### disk interaction |
|
353 | ### disk interaction | |
378 |
|
354 | |||
379 | def read(self): |
|
355 | def read(self): | |
380 | # ignore HG_PENDING because identity is used only for writing |
|
356 | # ignore HG_PENDING because identity is used only for writing | |
381 | self.identity = util.filestat.frompath( |
|
357 | self.identity = util.filestat.frompath( | |
382 | self._opener.join(self._filename) |
|
358 | self._opener.join(self._filename) | |
383 | ) |
|
359 | ) | |
384 |
|
360 | |||
385 | try: |
|
361 | try: | |
386 | fp = self._opendirstatefile() |
|
362 | fp = self._opendirstatefile() | |
387 | try: |
|
363 | try: | |
388 | st = fp.read() |
|
364 | st = fp.read() | |
389 | finally: |
|
365 | finally: | |
390 | fp.close() |
|
366 | fp.close() | |
391 | except IOError as err: |
|
367 | except IOError as err: | |
392 | if err.errno != errno.ENOENT: |
|
368 | if err.errno != errno.ENOENT: | |
393 | raise |
|
369 | raise | |
394 | return |
|
370 | return | |
395 | if not st: |
|
371 | if not st: | |
396 | return |
|
372 | return | |
397 |
|
373 | |||
398 | if util.safehasattr(parsers, b'dict_new_presized'): |
|
374 | if util.safehasattr(parsers, b'dict_new_presized'): | |
399 | # Make an estimate of the number of files in the dirstate based on |
|
375 | # Make an estimate of the number of files in the dirstate based on | |
400 | # its size. This trades wasting some memory for avoiding costly |
|
376 | # its size. This trades wasting some memory for avoiding costly | |
401 | # resizes. Each entry have a prefix of 17 bytes followed by one or |
|
377 | # resizes. Each entry have a prefix of 17 bytes followed by one or | |
402 | # two path names. Studies on various large-scale real-world repositories |
|
378 | # two path names. Studies on various large-scale real-world repositories | |
403 | # found 54 bytes a reasonable upper limit for the average path names. |
|
379 | # found 54 bytes a reasonable upper limit for the average path names. | |
404 | # Copy entries are ignored for the sake of this estimate. |
|
380 | # Copy entries are ignored for the sake of this estimate. | |
405 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
381 | self._map = parsers.dict_new_presized(len(st) // 71) | |
406 |
|
382 | |||
407 | # Python's garbage collector triggers a GC each time a certain number |
|
383 | # Python's garbage collector triggers a GC each time a certain number | |
408 | # of container objects (the number being defined by |
|
384 | # of container objects (the number being defined by | |
409 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
385 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple | |
410 | # for each file in the dirstate. The C version then immediately marks |
|
386 | # for each file in the dirstate. The C version then immediately marks | |
411 | # them as not to be tracked by the collector. However, this has no |
|
387 | # them as not to be tracked by the collector. However, this has no | |
412 | # effect on when GCs are triggered, only on what objects the GC looks |
|
388 | # effect on when GCs are triggered, only on what objects the GC looks | |
413 | # into. This means that O(number of files) GCs are unavoidable. |
|
389 | # into. This means that O(number of files) GCs are unavoidable. | |
414 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
390 | # Depending on when in the process's lifetime the dirstate is parsed, | |
415 | # this can get very expensive. As a workaround, disable GC while |
|
391 | # this can get very expensive. As a workaround, disable GC while | |
416 | # parsing the dirstate. |
|
392 | # parsing the dirstate. | |
417 | # |
|
393 | # | |
418 | # (we cannot decorate the function directly since it is in a C module) |
|
394 | # (we cannot decorate the function directly since it is in a C module) | |
419 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
395 | parse_dirstate = util.nogc(parsers.parse_dirstate) | |
420 | p = parse_dirstate(self._map, self.copymap, st) |
|
396 | p = parse_dirstate(self._map, self.copymap, st) | |
421 | if not self._dirtyparents: |
|
397 | if not self._dirtyparents: | |
422 | self.setparents(*p) |
|
398 | self.setparents(*p) | |
423 |
|
399 | |||
424 | # Avoid excess attribute lookups by fast pathing certain checks |
|
400 | # Avoid excess attribute lookups by fast pathing certain checks | |
425 | self.__contains__ = self._map.__contains__ |
|
401 | self.__contains__ = self._map.__contains__ | |
426 | self.__getitem__ = self._map.__getitem__ |
|
402 | self.__getitem__ = self._map.__getitem__ | |
427 | self.get = self._map.get |
|
403 | self.get = self._map.get | |
428 |
|
404 | |||
429 | def write(self, _tr, st, now): |
|
405 | def write(self, _tr, st, now): | |
430 | d = parsers.pack_dirstate(self._map, self.copymap, self.parents(), now) |
|
406 | d = parsers.pack_dirstate(self._map, self.copymap, self.parents(), now) | |
431 | st.write(d) |
|
407 | st.write(d) | |
432 | st.close() |
|
408 | st.close() | |
433 | self._dirtyparents = False |
|
409 | self._dirtyparents = False | |
434 |
|
410 | |||
435 | def _opendirstatefile(self): |
|
411 | def _opendirstatefile(self): | |
436 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
412 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | |
437 | if self._pendingmode is not None and self._pendingmode != mode: |
|
413 | if self._pendingmode is not None and self._pendingmode != mode: | |
438 | fp.close() |
|
414 | fp.close() | |
439 | raise error.Abort( |
|
415 | raise error.Abort( | |
440 | _(b'working directory state may be changed parallelly') |
|
416 | _(b'working directory state may be changed parallelly') | |
441 | ) |
|
417 | ) | |
442 | self._pendingmode = mode |
|
418 | self._pendingmode = mode | |
443 | return fp |
|
419 | return fp | |
444 |
|
420 | |||
445 | @propertycache |
|
421 | @propertycache | |
446 | def identity(self): |
|
422 | def identity(self): | |
447 | self._map |
|
423 | self._map | |
448 | return self.identity |
|
424 | return self.identity | |
449 |
|
425 | |||
450 | ### code related to maintaining and accessing "extra" property |
|
426 | ### code related to maintaining and accessing "extra" property | |
451 | # (e.g. "has_dir") |
|
427 | # (e.g. "has_dir") | |
452 |
|
428 | |||
453 | def _dirs_incr(self, filename, old_entry=None): |
|
429 | def _dirs_incr(self, filename, old_entry=None): | |
454 | """incremente the dirstate counter if applicable""" |
|
430 | """incremente the dirstate counter if applicable""" | |
455 | if ( |
|
431 | if ( | |
456 | old_entry is None or old_entry.removed |
|
432 | old_entry is None or old_entry.removed | |
457 | ) and "_dirs" in self.__dict__: |
|
433 | ) and "_dirs" in self.__dict__: | |
458 | self._dirs.addpath(filename) |
|
434 | self._dirs.addpath(filename) | |
459 | if old_entry is None and "_alldirs" in self.__dict__: |
|
435 | if old_entry is None and "_alldirs" in self.__dict__: | |
460 | self._alldirs.addpath(filename) |
|
436 | self._alldirs.addpath(filename) | |
461 |
|
437 | |||
462 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
438 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): | |
463 | """decremente the dirstate counter if applicable""" |
|
439 | """decremente the dirstate counter if applicable""" | |
464 | if old_entry is not None: |
|
440 | if old_entry is not None: | |
465 | if "_dirs" in self.__dict__ and not old_entry.removed: |
|
441 | if "_dirs" in self.__dict__ and not old_entry.removed: | |
466 | self._dirs.delpath(filename) |
|
442 | self._dirs.delpath(filename) | |
467 | if "_alldirs" in self.__dict__ and not remove_variant: |
|
443 | if "_alldirs" in self.__dict__ and not remove_variant: | |
468 | self._alldirs.delpath(filename) |
|
444 | self._alldirs.delpath(filename) | |
469 | elif remove_variant and "_alldirs" in self.__dict__: |
|
445 | elif remove_variant and "_alldirs" in self.__dict__: | |
470 | self._alldirs.addpath(filename) |
|
446 | self._alldirs.addpath(filename) | |
471 | if "filefoldmap" in self.__dict__: |
|
447 | if "filefoldmap" in self.__dict__: | |
472 | normed = util.normcase(filename) |
|
448 | normed = util.normcase(filename) | |
473 | self.filefoldmap.pop(normed, None) |
|
449 | self.filefoldmap.pop(normed, None) | |
474 |
|
450 | |||
475 | @propertycache |
|
451 | @propertycache | |
476 | def filefoldmap(self): |
|
452 | def filefoldmap(self): | |
477 | """Returns a dictionary mapping normalized case paths to their |
|
453 | """Returns a dictionary mapping normalized case paths to their | |
478 | non-normalized versions. |
|
454 | non-normalized versions. | |
479 | """ |
|
455 | """ | |
480 | try: |
|
456 | try: | |
481 | makefilefoldmap = parsers.make_file_foldmap |
|
457 | makefilefoldmap = parsers.make_file_foldmap | |
482 | except AttributeError: |
|
458 | except AttributeError: | |
483 | pass |
|
459 | pass | |
484 | else: |
|
460 | else: | |
485 | return makefilefoldmap( |
|
461 | return makefilefoldmap( | |
486 | self._map, util.normcasespec, util.normcasefallback |
|
462 | self._map, util.normcasespec, util.normcasefallback | |
487 | ) |
|
463 | ) | |
488 |
|
464 | |||
489 | f = {} |
|
465 | f = {} | |
490 | normcase = util.normcase |
|
466 | normcase = util.normcase | |
491 | for name, s in pycompat.iteritems(self._map): |
|
467 | for name, s in pycompat.iteritems(self._map): | |
492 | if not s.removed: |
|
468 | if not s.removed: | |
493 | f[normcase(name)] = name |
|
469 | f[normcase(name)] = name | |
494 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
470 | f[b'.'] = b'.' # prevents useless util.fspath() invocation | |
495 | return f |
|
471 | return f | |
496 |
|
472 | |||
497 | @propertycache |
|
473 | @propertycache | |
498 | def dirfoldmap(self): |
|
474 | def dirfoldmap(self): | |
499 | f = {} |
|
475 | f = {} | |
500 | normcase = util.normcase |
|
476 | normcase = util.normcase | |
501 | for name in self._dirs: |
|
477 | for name in self._dirs: | |
502 | f[normcase(name)] = name |
|
478 | f[normcase(name)] = name | |
503 | return f |
|
479 | return f | |
504 |
|
480 | |||
505 | def hastrackeddir(self, d): |
|
481 | def hastrackeddir(self, d): | |
506 | """ |
|
482 | """ | |
507 | Returns True if the dirstate contains a tracked (not removed) file |
|
483 | Returns True if the dirstate contains a tracked (not removed) file | |
508 | in this directory. |
|
484 | in this directory. | |
509 | """ |
|
485 | """ | |
510 | return d in self._dirs |
|
486 | return d in self._dirs | |
511 |
|
487 | |||
512 | def hasdir(self, d): |
|
488 | def hasdir(self, d): | |
513 | """ |
|
489 | """ | |
514 | Returns True if the dirstate contains a file (tracked or removed) |
|
490 | Returns True if the dirstate contains a file (tracked or removed) | |
515 | in this directory. |
|
491 | in this directory. | |
516 | """ |
|
492 | """ | |
517 | return d in self._alldirs |
|
493 | return d in self._alldirs | |
518 |
|
494 | |||
519 | @propertycache |
|
495 | @propertycache | |
520 | def _dirs(self): |
|
496 | def _dirs(self): | |
521 | return pathutil.dirs(self._map, only_tracked=True) |
|
497 | return pathutil.dirs(self._map, only_tracked=True) | |
522 |
|
498 | |||
523 | @propertycache |
|
499 | @propertycache | |
524 | def _alldirs(self): |
|
500 | def _alldirs(self): | |
525 | return pathutil.dirs(self._map) |
|
501 | return pathutil.dirs(self._map) | |
526 |
|
502 | |||
527 | ### code related to manipulation of entries and copy-sources |
|
503 | ### code related to manipulation of entries and copy-sources | |
528 |
|
504 | |||
529 | def _refresh_entry(self, f, entry): |
|
505 | def _refresh_entry(self, f, entry): | |
530 | if not entry.any_tracked: |
|
506 | if not entry.any_tracked: | |
531 | self._map.pop(f, None) |
|
507 | self._map.pop(f, None) | |
532 |
|
508 | |||
533 | def _insert_entry(self, f, entry): |
|
509 | def _insert_entry(self, f, entry): | |
534 | self._map[f] = entry |
|
510 | self._map[f] = entry | |
535 |
|
511 | |||
536 | def _drop_entry(self, f): |
|
512 | def _drop_entry(self, f): | |
537 | self._map.pop(f, None) |
|
513 | self._map.pop(f, None) | |
538 | self.copymap.pop(f, None) |
|
514 | self.copymap.pop(f, None) | |
539 |
|
515 | |||
540 |
|
516 | |||
541 | if rustmod is not None: |
|
517 | if rustmod is not None: | |
542 |
|
518 | |||
543 | class dirstatemap(_dirstatemapcommon): |
|
519 | class dirstatemap(_dirstatemapcommon): | |
544 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
520 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
545 | super(dirstatemap, self).__init__( |
|
521 | super(dirstatemap, self).__init__( | |
546 | ui, opener, root, nodeconstants, use_dirstate_v2 |
|
522 | ui, opener, root, nodeconstants, use_dirstate_v2 | |
547 | ) |
|
523 | ) | |
548 | self._docket = None |
|
524 | self._docket = None | |
549 |
|
525 | |||
550 | ### Core data storage and access |
|
526 | ### Core data storage and access | |
551 |
|
527 | |||
552 | @property |
|
528 | @property | |
553 | def docket(self): |
|
529 | def docket(self): | |
554 | if not self._docket: |
|
530 | if not self._docket: | |
555 | if not self._use_dirstate_v2: |
|
531 | if not self._use_dirstate_v2: | |
556 | raise error.ProgrammingError( |
|
532 | raise error.ProgrammingError( | |
557 | b'dirstate only has a docket in v2 format' |
|
533 | b'dirstate only has a docket in v2 format' | |
558 | ) |
|
534 | ) | |
559 | self._docket = docketmod.DirstateDocket.parse( |
|
535 | self._docket = docketmod.DirstateDocket.parse( | |
560 | self._readdirstatefile(), self._nodeconstants |
|
536 | self._readdirstatefile(), self._nodeconstants | |
561 | ) |
|
537 | ) | |
562 | return self._docket |
|
538 | return self._docket | |
563 |
|
539 | |||
564 | @propertycache |
|
540 | @propertycache | |
565 | def _map(self): |
|
541 | def _map(self): | |
566 | """ |
|
542 | """ | |
567 | Fills the Dirstatemap when called. |
|
543 | Fills the Dirstatemap when called. | |
568 | """ |
|
544 | """ | |
569 | # ignore HG_PENDING because identity is used only for writing |
|
545 | # ignore HG_PENDING because identity is used only for writing | |
570 | self.identity = util.filestat.frompath( |
|
546 | self.identity = util.filestat.frompath( | |
571 | self._opener.join(self._filename) |
|
547 | self._opener.join(self._filename) | |
572 | ) |
|
548 | ) | |
573 |
|
549 | |||
574 | if self._use_dirstate_v2: |
|
550 | if self._use_dirstate_v2: | |
575 | if self.docket.uuid: |
|
551 | if self.docket.uuid: | |
576 | # TODO: use mmap when possible |
|
552 | # TODO: use mmap when possible | |
577 | data = self._opener.read(self.docket.data_filename()) |
|
553 | data = self._opener.read(self.docket.data_filename()) | |
578 | else: |
|
554 | else: | |
579 | data = b'' |
|
555 | data = b'' | |
580 | self._map = rustmod.DirstateMap.new_v2( |
|
556 | self._map = rustmod.DirstateMap.new_v2( | |
581 | data, self.docket.data_size, self.docket.tree_metadata |
|
557 | data, self.docket.data_size, self.docket.tree_metadata | |
582 | ) |
|
558 | ) | |
583 | parents = self.docket.parents |
|
559 | parents = self.docket.parents | |
584 | else: |
|
560 | else: | |
585 | self._map, parents = rustmod.DirstateMap.new_v1( |
|
561 | self._map, parents = rustmod.DirstateMap.new_v1( | |
586 | self._readdirstatefile() |
|
562 | self._readdirstatefile() | |
587 | ) |
|
563 | ) | |
588 |
|
564 | |||
589 | if parents and not self._dirtyparents: |
|
565 | if parents and not self._dirtyparents: | |
590 | self.setparents(*parents) |
|
566 | self.setparents(*parents) | |
591 |
|
567 | |||
592 | self.__contains__ = self._map.__contains__ |
|
568 | self.__contains__ = self._map.__contains__ | |
593 | self.__getitem__ = self._map.__getitem__ |
|
569 | self.__getitem__ = self._map.__getitem__ | |
594 | self.get = self._map.get |
|
570 | self.get = self._map.get | |
595 | return self._map |
|
571 | return self._map | |
596 |
|
572 | |||
597 | @property |
|
573 | @property | |
598 | def copymap(self): |
|
574 | def copymap(self): | |
599 | return self._map.copymap() |
|
575 | return self._map.copymap() | |
600 |
|
576 | |||
601 | def debug_iter(self, all): |
|
577 | def debug_iter(self, all): | |
602 | """ |
|
578 | """ | |
603 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
579 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
604 |
|
580 | |||
605 | `all`: also include with `state == b' '` dirstate tree nodes that |
|
581 | `all`: also include with `state == b' '` dirstate tree nodes that | |
606 | don't have an associated `DirstateItem`. |
|
582 | don't have an associated `DirstateItem`. | |
607 |
|
583 | |||
608 | """ |
|
584 | """ | |
609 | return self._map.debug_iter(all) |
|
585 | return self._map.debug_iter(all) | |
610 |
|
586 | |||
611 | def clear(self): |
|
587 | def clear(self): | |
612 | self._map.clear() |
|
588 | self._map.clear() | |
613 | self.setparents( |
|
589 | self.setparents( | |
614 | self._nodeconstants.nullid, self._nodeconstants.nullid |
|
590 | self._nodeconstants.nullid, self._nodeconstants.nullid | |
615 | ) |
|
591 | ) | |
616 | util.clearcachedproperty(self, b"_dirs") |
|
592 | util.clearcachedproperty(self, b"_dirs") | |
617 | util.clearcachedproperty(self, b"_alldirs") |
|
593 | util.clearcachedproperty(self, b"_alldirs") | |
618 | util.clearcachedproperty(self, b"dirfoldmap") |
|
594 | util.clearcachedproperty(self, b"dirfoldmap") | |
619 |
|
595 | |||
620 | def items(self): |
|
596 | def items(self): | |
621 | return self._map.items() |
|
597 | return self._map.items() | |
622 |
|
598 | |||
623 | # forward for python2,3 compat |
|
599 | # forward for python2,3 compat | |
624 | iteritems = items |
|
600 | iteritems = items | |
625 |
|
601 | |||
626 | def keys(self): |
|
602 | def keys(self): | |
627 | return iter(self._map) |
|
603 | return iter(self._map) | |
628 |
|
604 | |||
629 | ### reading/setting parents |
|
605 | ### reading/setting parents | |
630 |
|
606 | |||
631 | def setparents(self, p1, p2, fold_p2=False): |
|
607 | def setparents(self, p1, p2, fold_p2=False): | |
632 | self._parents = (p1, p2) |
|
608 | self._parents = (p1, p2) | |
633 | self._dirtyparents = True |
|
609 | self._dirtyparents = True | |
634 | copies = {} |
|
610 | copies = {} | |
635 | if fold_p2: |
|
611 | if fold_p2: | |
636 | # Collect into an intermediate list to avoid a `RuntimeError` |
|
612 | # Collect into an intermediate list to avoid a `RuntimeError` | |
637 | # exception due to mutation during iteration. |
|
613 | # exception due to mutation during iteration. | |
638 | # TODO: move this the whole loop to Rust where `iter_mut` |
|
614 | # TODO: move this the whole loop to Rust where `iter_mut` | |
639 | # enables in-place mutation of elements of a collection while |
|
615 | # enables in-place mutation of elements of a collection while | |
640 | # iterating it, without mutating the collection itself. |
|
616 | # iterating it, without mutating the collection itself. | |
641 | files_with_p2_info = [ |
|
617 | files_with_p2_info = [ | |
642 | f for f, s in self._map.items() if s.merged or s.from_p2 |
|
618 | f for f, s in self._map.items() if s.merged or s.from_p2 | |
643 | ] |
|
619 | ] | |
644 | rust_map = self._map |
|
620 | rust_map = self._map | |
645 | for f in files_with_p2_info: |
|
621 | for f in files_with_p2_info: | |
646 | e = rust_map.get(f) |
|
622 | e = rust_map.get(f) | |
647 | source = self.copymap.pop(f, None) |
|
623 | source = self.copymap.pop(f, None) | |
648 | if source: |
|
624 | if source: | |
649 | copies[f] = source |
|
625 | copies[f] = source | |
650 | e.drop_merge_data() |
|
626 | e.drop_merge_data() | |
651 | rust_map.set_dirstate_item(f, e) |
|
627 | rust_map.set_dirstate_item(f, e) | |
652 | return copies |
|
628 | return copies | |
653 |
|
629 | |||
654 | def parents(self): |
|
630 | def parents(self): | |
655 | if not self._parents: |
|
631 | if not self._parents: | |
656 | if self._use_dirstate_v2: |
|
632 | if self._use_dirstate_v2: | |
657 | self._parents = self.docket.parents |
|
633 | self._parents = self.docket.parents | |
658 | else: |
|
634 | else: | |
659 | read_len = self._nodelen * 2 |
|
635 | read_len = self._nodelen * 2 | |
660 | st = self._readdirstatefile(read_len) |
|
636 | st = self._readdirstatefile(read_len) | |
661 | l = len(st) |
|
637 | l = len(st) | |
662 | if l == read_len: |
|
638 | if l == read_len: | |
663 | self._parents = ( |
|
639 | self._parents = ( | |
664 | st[: self._nodelen], |
|
640 | st[: self._nodelen], | |
665 | st[self._nodelen : 2 * self._nodelen], |
|
641 | st[self._nodelen : 2 * self._nodelen], | |
666 | ) |
|
642 | ) | |
667 | elif l == 0: |
|
643 | elif l == 0: | |
668 | self._parents = ( |
|
644 | self._parents = ( | |
669 | self._nodeconstants.nullid, |
|
645 | self._nodeconstants.nullid, | |
670 | self._nodeconstants.nullid, |
|
646 | self._nodeconstants.nullid, | |
671 | ) |
|
647 | ) | |
672 | else: |
|
648 | else: | |
673 | raise error.Abort( |
|
649 | raise error.Abort( | |
674 | _(b'working directory state appears damaged!') |
|
650 | _(b'working directory state appears damaged!') | |
675 | ) |
|
651 | ) | |
676 |
|
652 | |||
677 | return self._parents |
|
653 | return self._parents | |
678 |
|
654 | |||
679 | ### disk interaction |
|
655 | ### disk interaction | |
680 |
|
656 | |||
681 | @propertycache |
|
657 | @propertycache | |
682 | def identity(self): |
|
658 | def identity(self): | |
683 | self._map |
|
659 | self._map | |
684 | return self.identity |
|
660 | return self.identity | |
685 |
|
661 | |||
686 | def write(self, tr, st, now): |
|
662 | def write(self, tr, st, now): | |
687 | if not self._use_dirstate_v2: |
|
663 | if not self._use_dirstate_v2: | |
688 | p1, p2 = self.parents() |
|
664 | p1, p2 = self.parents() | |
689 | packed = self._map.write_v1(p1, p2, now) |
|
665 | packed = self._map.write_v1(p1, p2, now) | |
690 | st.write(packed) |
|
666 | st.write(packed) | |
691 | st.close() |
|
667 | st.close() | |
692 | self._dirtyparents = False |
|
668 | self._dirtyparents = False | |
693 | return |
|
669 | return | |
694 |
|
670 | |||
695 | # We can only append to an existing data file if there is one |
|
671 | # We can only append to an existing data file if there is one | |
696 | can_append = self.docket.uuid is not None |
|
672 | can_append = self.docket.uuid is not None | |
697 | packed, meta, append = self._map.write_v2(now, can_append) |
|
673 | packed, meta, append = self._map.write_v2(now, can_append) | |
698 | if append: |
|
674 | if append: | |
699 | docket = self.docket |
|
675 | docket = self.docket | |
700 | data_filename = docket.data_filename() |
|
676 | data_filename = docket.data_filename() | |
701 | if tr: |
|
677 | if tr: | |
702 | tr.add(data_filename, docket.data_size) |
|
678 | tr.add(data_filename, docket.data_size) | |
703 | with self._opener(data_filename, b'r+b') as fp: |
|
679 | with self._opener(data_filename, b'r+b') as fp: | |
704 | fp.seek(docket.data_size) |
|
680 | fp.seek(docket.data_size) | |
705 | assert fp.tell() == docket.data_size |
|
681 | assert fp.tell() == docket.data_size | |
706 | written = fp.write(packed) |
|
682 | written = fp.write(packed) | |
707 | if written is not None: # py2 may return None |
|
683 | if written is not None: # py2 may return None | |
708 | assert written == len(packed), (written, len(packed)) |
|
684 | assert written == len(packed), (written, len(packed)) | |
709 | docket.data_size += len(packed) |
|
685 | docket.data_size += len(packed) | |
710 | docket.parents = self.parents() |
|
686 | docket.parents = self.parents() | |
711 | docket.tree_metadata = meta |
|
687 | docket.tree_metadata = meta | |
712 | st.write(docket.serialize()) |
|
688 | st.write(docket.serialize()) | |
713 | st.close() |
|
689 | st.close() | |
714 | else: |
|
690 | else: | |
715 | old_docket = self.docket |
|
691 | old_docket = self.docket | |
716 | new_docket = docketmod.DirstateDocket.with_new_uuid( |
|
692 | new_docket = docketmod.DirstateDocket.with_new_uuid( | |
717 | self.parents(), len(packed), meta |
|
693 | self.parents(), len(packed), meta | |
718 | ) |
|
694 | ) | |
719 | data_filename = new_docket.data_filename() |
|
695 | data_filename = new_docket.data_filename() | |
720 | if tr: |
|
696 | if tr: | |
721 | tr.add(data_filename, 0) |
|
697 | tr.add(data_filename, 0) | |
722 | self._opener.write(data_filename, packed) |
|
698 | self._opener.write(data_filename, packed) | |
723 | # Write the new docket after the new data file has been |
|
699 | # Write the new docket after the new data file has been | |
724 | # written. Because `st` was opened with `atomictemp=True`, |
|
700 | # written. Because `st` was opened with `atomictemp=True`, | |
725 | # the actual `.hg/dirstate` file is only affected on close. |
|
701 | # the actual `.hg/dirstate` file is only affected on close. | |
726 | st.write(new_docket.serialize()) |
|
702 | st.write(new_docket.serialize()) | |
727 | st.close() |
|
703 | st.close() | |
728 | # Remove the old data file after the new docket pointing to |
|
704 | # Remove the old data file after the new docket pointing to | |
729 | # the new data file was written. |
|
705 | # the new data file was written. | |
730 | if old_docket.uuid: |
|
706 | if old_docket.uuid: | |
731 | data_filename = old_docket.data_filename() |
|
707 | data_filename = old_docket.data_filename() | |
732 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
708 | unlink = lambda _tr=None: self._opener.unlink(data_filename) | |
733 | if tr: |
|
709 | if tr: | |
734 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
710 | category = b"dirstate-v2-clean-" + old_docket.uuid | |
735 | tr.addpostclose(category, unlink) |
|
711 | tr.addpostclose(category, unlink) | |
736 | else: |
|
712 | else: | |
737 | unlink() |
|
713 | unlink() | |
738 | self._docket = new_docket |
|
714 | self._docket = new_docket | |
739 | # Reload from the newly-written file |
|
715 | # Reload from the newly-written file | |
740 | util.clearcachedproperty(self, b"_map") |
|
716 | util.clearcachedproperty(self, b"_map") | |
741 | self._dirtyparents = False |
|
717 | self._dirtyparents = False | |
742 |
|
718 | |||
743 | def _opendirstatefile(self): |
|
719 | def _opendirstatefile(self): | |
744 | fp, mode = txnutil.trypending( |
|
720 | fp, mode = txnutil.trypending( | |
745 | self._root, self._opener, self._filename |
|
721 | self._root, self._opener, self._filename | |
746 | ) |
|
722 | ) | |
747 | if self._pendingmode is not None and self._pendingmode != mode: |
|
723 | if self._pendingmode is not None and self._pendingmode != mode: | |
748 | fp.close() |
|
724 | fp.close() | |
749 | raise error.Abort( |
|
725 | raise error.Abort( | |
750 | _(b'working directory state may be changed parallelly') |
|
726 | _(b'working directory state may be changed parallelly') | |
751 | ) |
|
727 | ) | |
752 | self._pendingmode = mode |
|
728 | self._pendingmode = mode | |
753 | return fp |
|
729 | return fp | |
754 |
|
730 | |||
755 | def _readdirstatefile(self, size=-1): |
|
731 | def _readdirstatefile(self, size=-1): | |
756 | try: |
|
732 | try: | |
757 | with self._opendirstatefile() as fp: |
|
733 | with self._opendirstatefile() as fp: | |
758 | return fp.read(size) |
|
734 | return fp.read(size) | |
759 | except IOError as err: |
|
735 | except IOError as err: | |
760 | if err.errno != errno.ENOENT: |
|
736 | if err.errno != errno.ENOENT: | |
761 | raise |
|
737 | raise | |
762 | # File doesn't exist, so the current state is empty |
|
738 | # File doesn't exist, so the current state is empty | |
763 | return b'' |
|
739 | return b'' | |
764 |
|
740 | |||
765 | ### code related to maintaining and accessing "extra" property |
|
741 | ### code related to maintaining and accessing "extra" property | |
766 | # (e.g. "has_dir") |
|
742 | # (e.g. "has_dir") | |
767 |
|
743 | |||
768 | @propertycache |
|
744 | @propertycache | |
769 | def filefoldmap(self): |
|
745 | def filefoldmap(self): | |
770 | """Returns a dictionary mapping normalized case paths to their |
|
746 | """Returns a dictionary mapping normalized case paths to their | |
771 | non-normalized versions. |
|
747 | non-normalized versions. | |
772 | """ |
|
748 | """ | |
773 | return self._map.filefoldmapasdict() |
|
749 | return self._map.filefoldmapasdict() | |
774 |
|
750 | |||
775 | def hastrackeddir(self, d): |
|
751 | def hastrackeddir(self, d): | |
776 | return self._map.hastrackeddir(d) |
|
752 | return self._map.hastrackeddir(d) | |
777 |
|
753 | |||
778 | def hasdir(self, d): |
|
754 | def hasdir(self, d): | |
779 | return self._map.hasdir(d) |
|
755 | return self._map.hasdir(d) | |
780 |
|
756 | |||
781 | @propertycache |
|
757 | @propertycache | |
782 | def dirfoldmap(self): |
|
758 | def dirfoldmap(self): | |
783 | f = {} |
|
759 | f = {} | |
784 | normcase = util.normcase |
|
760 | normcase = util.normcase | |
785 | for name in self._map.tracked_dirs(): |
|
761 | for name in self._map.tracked_dirs(): | |
786 | f[normcase(name)] = name |
|
762 | f[normcase(name)] = name | |
787 | return f |
|
763 | return f | |
788 |
|
764 | |||
789 | ### code related to manipulation of entries and copy-sources |
|
765 | ### code related to manipulation of entries and copy-sources | |
790 |
|
766 | |||
791 | def _refresh_entry(self, f, entry): |
|
767 | def _refresh_entry(self, f, entry): | |
792 | if not entry.any_tracked: |
|
768 | if not entry.any_tracked: | |
793 | self._map.drop_item_and_copy_source(f) |
|
769 | self._map.drop_item_and_copy_source(f) | |
794 | else: |
|
770 | else: | |
795 | self._map.addfile(f, entry) |
|
771 | self._map.addfile(f, entry) | |
796 |
|
772 | |||
797 | def _insert_entry(self, f, entry): |
|
773 | def _insert_entry(self, f, entry): | |
798 | self._map.addfile(f, entry) |
|
774 | self._map.addfile(f, entry) | |
799 |
|
775 | |||
800 | def _drop_entry(self, f): |
|
776 | def _drop_entry(self, f): | |
801 | self._map.drop_item_and_copy_source(f) |
|
777 | self._map.drop_item_and_copy_source(f) | |
802 |
|
778 | |||
803 | def __setitem__(self, key, value): |
|
779 | def __setitem__(self, key, value): | |
804 | assert isinstance(value, DirstateItem) |
|
780 | assert isinstance(value, DirstateItem) | |
805 | self._map.set_dirstate_item(key, value) |
|
781 | self._map.set_dirstate_item(key, value) |
@@ -1,842 +1,784 | |||||
1 | # parsers.py - Python implementation of parsers.c |
|
1 | # parsers.py - Python implementation of parsers.c | |
2 | # |
|
2 | # | |
3 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import struct |
|
10 | import struct | |
11 | import zlib |
|
11 | import zlib | |
12 |
|
12 | |||
13 | from ..node import ( |
|
13 | from ..node import ( | |
14 | nullrev, |
|
14 | nullrev, | |
15 | sha1nodeconstants, |
|
15 | sha1nodeconstants, | |
16 | ) |
|
16 | ) | |
17 | from ..thirdparty import attr |
|
17 | from ..thirdparty import attr | |
18 | from .. import ( |
|
18 | from .. import ( | |
19 | error, |
|
19 | error, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | revlogutils, |
|
21 | revlogutils, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | from ..revlogutils import nodemap as nodemaputil |
|
25 | from ..revlogutils import nodemap as nodemaputil | |
26 | from ..revlogutils import constants as revlog_constants |
|
26 | from ..revlogutils import constants as revlog_constants | |
27 |
|
27 | |||
28 | stringio = pycompat.bytesio |
|
28 | stringio = pycompat.bytesio | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | _pack = struct.pack |
|
31 | _pack = struct.pack | |
32 | _unpack = struct.unpack |
|
32 | _unpack = struct.unpack | |
33 | _compress = zlib.compress |
|
33 | _compress = zlib.compress | |
34 | _decompress = zlib.decompress |
|
34 | _decompress = zlib.decompress | |
35 |
|
35 | |||
36 |
|
36 | |||
37 | # a special value used internally for `size` if the file come from the other parent |
|
37 | # a special value used internally for `size` if the file come from the other parent | |
38 | FROM_P2 = -2 |
|
38 | FROM_P2 = -2 | |
39 |
|
39 | |||
40 | # a special value used internally for `size` if the file is modified/merged/added |
|
40 | # a special value used internally for `size` if the file is modified/merged/added | |
41 | NONNORMAL = -1 |
|
41 | NONNORMAL = -1 | |
42 |
|
42 | |||
43 | # a special value used internally for `time` if the time is ambigeous |
|
43 | # a special value used internally for `time` if the time is ambigeous | |
44 | AMBIGUOUS_TIME = -1 |
|
44 | AMBIGUOUS_TIME = -1 | |
45 |
|
45 | |||
46 |
|
46 | |||
47 | @attr.s(slots=True, init=False) |
|
47 | @attr.s(slots=True, init=False) | |
48 | class DirstateItem(object): |
|
48 | class DirstateItem(object): | |
49 | """represent a dirstate entry |
|
49 | """represent a dirstate entry | |
50 |
|
50 | |||
51 | It hold multiple attributes |
|
51 | It hold multiple attributes | |
52 |
|
52 | |||
53 | # about file tracking |
|
53 | # about file tracking | |
54 | - wc_tracked: is the file tracked by the working copy |
|
54 | - wc_tracked: is the file tracked by the working copy | |
55 | - p1_tracked: is the file tracked in working copy first parent |
|
55 | - p1_tracked: is the file tracked in working copy first parent | |
56 | - p2_tracked: is the file tracked in working copy second parent |
|
56 | - p2_info: the file has been involved in some merge operation. Either | |
57 |
|
57 | because it was actually merged, or because the p2 version was | ||
58 | # about what possible merge action related to this file |
|
58 | ahead, or because some renamed moved it there. In either case | |
59 | - clean_p1: merge picked the file content from p1 |
|
59 | `hg status` will want it displayed as modified. | |
60 | - clean_p2: merge picked the file content from p2 |
|
|||
61 | - merged: file gather changes from both side. |
|
|||
62 |
|
60 | |||
63 | # about the file state expected from p1 manifest: |
|
61 | # about the file state expected from p1 manifest: | |
64 | - mode: the file mode in p1 |
|
62 | - mode: the file mode in p1 | |
65 | - size: the file size in p1 |
|
63 | - size: the file size in p1 | |
66 |
|
64 | |||
|
65 | These value can be set to None, which mean we don't have a meaningful value | |||
|
66 | to compare with. Either because we don't really care about them as there | |||
|
67 | `status` is known without having to look at the disk or because we don't | |||
|
68 | know these right now and a full comparison will be needed to find out if | |||
|
69 | the file is clean. | |||
|
70 | ||||
67 | # about the file state on disk last time we saw it: |
|
71 | # about the file state on disk last time we saw it: | |
68 | - mtime: the last known clean mtime for the file. |
|
72 | - mtime: the last known clean mtime for the file. | |
69 |
|
73 | |||
70 | The last three item (mode, size and mtime) can be None if no meaningful (or |
|
74 | This value can be set to None if no cachable state exist. Either because we | |
71 | trusted) value exists. |
|
75 | do not care (see previous section) or because we could not cache something | |
72 |
|
76 | yet. | ||
73 | """ |
|
77 | """ | |
74 |
|
78 | |||
75 | _wc_tracked = attr.ib() |
|
79 | _wc_tracked = attr.ib() | |
76 | _p1_tracked = attr.ib() |
|
80 | _p1_tracked = attr.ib() | |
77 |
_p2_ |
|
81 | _p2_info = attr.ib() | |
78 | # the three item above should probably be combined |
|
|||
79 | # |
|
|||
80 | # However it is unclear if they properly cover some of the most advanced |
|
|||
81 | # merge case. So we should probably wait on this to be settled. |
|
|||
82 | _merged = attr.ib() |
|
|||
83 | _clean_p1 = attr.ib() |
|
|||
84 | _clean_p2 = attr.ib() |
|
|||
85 | _possibly_dirty = attr.ib() |
|
|||
86 | _mode = attr.ib() |
|
82 | _mode = attr.ib() | |
87 | _size = attr.ib() |
|
83 | _size = attr.ib() | |
88 | _mtime = attr.ib() |
|
84 | _mtime = attr.ib() | |
89 |
|
85 | |||
90 | def __init__( |
|
86 | def __init__( | |
91 | self, |
|
87 | self, | |
92 | wc_tracked=False, |
|
88 | wc_tracked=False, | |
93 | p1_tracked=False, |
|
89 | p1_tracked=False, | |
94 |
p2_ |
|
90 | p2_info=False, | |
95 | merged=False, |
|
91 | has_meaningful_data=True, | |
96 | clean_p1=False, |
|
92 | has_meaningful_mtime=True, | |
97 | clean_p2=False, |
|
|||
98 | possibly_dirty=False, |
|
|||
99 | parentfiledata=None, |
|
93 | parentfiledata=None, | |
100 | ): |
|
94 | ): | |
101 | if merged and (clean_p1 or clean_p2): |
|
|||
102 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' |
|
|||
103 | raise error.ProgrammingError(msg) |
|
|||
104 |
|
||||
105 | assert not (merged and not p1_tracked) |
|
|||
106 | self._wc_tracked = wc_tracked |
|
95 | self._wc_tracked = wc_tracked | |
107 | self._p1_tracked = p1_tracked |
|
96 | self._p1_tracked = p1_tracked | |
108 |
self._p2_ |
|
97 | self._p2_info = p2_info | |
109 | self._merged = merged |
|
98 | ||
110 | self._clean_p1 = clean_p1 |
|
|||
111 | self._clean_p2 = clean_p2 |
|
|||
112 | self._possibly_dirty = possibly_dirty |
|
|||
113 | if parentfiledata is None: |
|
|||
114 |
|
|
99 | self._mode = None | |
115 |
|
|
100 | self._size = None | |
116 |
|
|
101 | self._mtime = None | |
117 | else: |
|
102 | if parentfiledata is None: | |
|
103 | has_meaningful_mtime = False | |||
|
104 | has_meaningful_data = False | |||
|
105 | if has_meaningful_data: | |||
118 | self._mode = parentfiledata[0] |
|
106 | self._mode = parentfiledata[0] | |
119 | self._size = parentfiledata[1] |
|
107 | self._size = parentfiledata[1] | |
|
108 | if has_meaningful_mtime: | |||
120 | self._mtime = parentfiledata[2] |
|
109 | self._mtime = parentfiledata[2] | |
121 |
|
110 | |||
122 | @classmethod |
|
111 | @classmethod | |
123 | def new_added(cls): |
|
112 | def new_added(cls): | |
124 | """constructor to help legacy API to build a new "added" item |
|
113 | """constructor to help legacy API to build a new "added" item | |
125 |
|
114 | |||
126 | Should eventually be removed |
|
115 | Should eventually be removed | |
127 | """ |
|
116 | """ | |
128 | instance = cls() |
|
117 | return cls(wc_tracked=True) | |
129 | instance._wc_tracked = True |
|
|||
130 | instance._p1_tracked = False |
|
|||
131 | instance._p2_tracked = False |
|
|||
132 | return instance |
|
|||
133 |
|
118 | |||
134 | @classmethod |
|
119 | @classmethod | |
135 | def new_merged(cls): |
|
120 | def new_merged(cls): | |
136 | """constructor to help legacy API to build a new "merged" item |
|
121 | """constructor to help legacy API to build a new "merged" item | |
137 |
|
122 | |||
138 | Should eventually be removed |
|
123 | Should eventually be removed | |
139 | """ |
|
124 | """ | |
140 | instance = cls() |
|
125 | return cls(wc_tracked=True, p1_tracked=True, p2_info=True) | |
141 | instance._wc_tracked = True |
|
|||
142 | instance._p1_tracked = True # might not be True because of rename ? |
|
|||
143 | instance._p2_tracked = True # might not be True because of rename ? |
|
|||
144 | instance._merged = True |
|
|||
145 | return instance |
|
|||
146 |
|
126 | |||
147 | @classmethod |
|
127 | @classmethod | |
148 | def new_from_p2(cls): |
|
128 | def new_from_p2(cls): | |
149 | """constructor to help legacy API to build a new "from_p2" item |
|
129 | """constructor to help legacy API to build a new "from_p2" item | |
150 |
|
130 | |||
151 | Should eventually be removed |
|
131 | Should eventually be removed | |
152 | """ |
|
132 | """ | |
153 | instance = cls() |
|
133 | return cls(wc_tracked=True, p2_info=True) | |
154 | instance._wc_tracked = True |
|
|||
155 | instance._p1_tracked = False # might actually be True |
|
|||
156 | instance._p2_tracked = True |
|
|||
157 | instance._clean_p2 = True |
|
|||
158 | return instance |
|
|||
159 |
|
134 | |||
160 | @classmethod |
|
135 | @classmethod | |
161 | def new_possibly_dirty(cls): |
|
136 | def new_possibly_dirty(cls): | |
162 | """constructor to help legacy API to build a new "possibly_dirty" item |
|
137 | """constructor to help legacy API to build a new "possibly_dirty" item | |
163 |
|
138 | |||
164 | Should eventually be removed |
|
139 | Should eventually be removed | |
165 | """ |
|
140 | """ | |
166 | instance = cls() |
|
141 | return cls(wc_tracked=True, p1_tracked=True) | |
167 | instance._wc_tracked = True |
|
|||
168 | instance._p1_tracked = True |
|
|||
169 | instance._possibly_dirty = True |
|
|||
170 | return instance |
|
|||
171 |
|
142 | |||
172 | @classmethod |
|
143 | @classmethod | |
173 | def new_normal(cls, mode, size, mtime): |
|
144 | def new_normal(cls, mode, size, mtime): | |
174 | """constructor to help legacy API to build a new "normal" item |
|
145 | """constructor to help legacy API to build a new "normal" item | |
175 |
|
146 | |||
176 | Should eventually be removed |
|
147 | Should eventually be removed | |
177 | """ |
|
148 | """ | |
178 | assert size != FROM_P2 |
|
149 | assert size != FROM_P2 | |
179 | assert size != NONNORMAL |
|
150 | assert size != NONNORMAL | |
180 |
|
|
151 | return cls( | |
181 |
|
|
152 | wc_tracked=True, | |
182 |
|
|
153 | p1_tracked=True, | |
183 | instance._mode = mode |
|
154 | parentfiledata=(mode, size, mtime), | |
184 | instance._size = size |
|
155 | ) | |
185 | instance._mtime = mtime |
|
|||
186 | return instance |
|
|||
187 |
|
156 | |||
188 | @classmethod |
|
157 | @classmethod | |
189 | def from_v1_data(cls, state, mode, size, mtime): |
|
158 | def from_v1_data(cls, state, mode, size, mtime): | |
190 | """Build a new DirstateItem object from V1 data |
|
159 | """Build a new DirstateItem object from V1 data | |
191 |
|
160 | |||
192 | Since the dirstate-v1 format is frozen, the signature of this function |
|
161 | Since the dirstate-v1 format is frozen, the signature of this function | |
193 | is not expected to change, unlike the __init__ one. |
|
162 | is not expected to change, unlike the __init__ one. | |
194 | """ |
|
163 | """ | |
195 | if state == b'm': |
|
164 | if state == b'm': | |
196 | return cls.new_merged() |
|
165 | return cls.new_merged() | |
197 | elif state == b'a': |
|
166 | elif state == b'a': | |
198 | return cls.new_added() |
|
167 | return cls.new_added() | |
199 | elif state == b'r': |
|
168 | elif state == b'r': | |
200 | instance = cls() |
|
|||
201 | instance._wc_tracked = False |
|
|||
202 | if size == NONNORMAL: |
|
169 | if size == NONNORMAL: | |
203 |
|
|
170 | p1_tracked = True | |
204 |
|
|
171 | p2_info = True | |
205 | True # might not be True because of rename ? |
|
|||
206 | ) |
|
|||
207 | instance._p2_tracked = ( |
|
|||
208 | True # might not be True because of rename ? |
|
|||
209 | ) |
|
|||
210 | elif size == FROM_P2: |
|
172 | elif size == FROM_P2: | |
211 |
|
|
173 | p1_tracked = False | |
212 |
|
|
174 | p2_info = True | |
213 | False # We actually don't know (file history) |
|
|||
214 | ) |
|
|||
215 | instance._p2_tracked = True |
|
|||
216 | else: |
|
175 | else: | |
217 |
|
|
176 | p1_tracked = True | |
218 | return instance |
|
177 | p2_info = False | |
|
178 | return cls(p1_tracked=p1_tracked, p2_info=p2_info) | |||
219 | elif state == b'n': |
|
179 | elif state == b'n': | |
220 | if size == FROM_P2: |
|
180 | if size == FROM_P2: | |
221 | return cls.new_from_p2() |
|
181 | return cls.new_from_p2() | |
222 | elif size == NONNORMAL: |
|
182 | elif size == NONNORMAL: | |
223 | return cls.new_possibly_dirty() |
|
183 | return cls.new_possibly_dirty() | |
224 | elif mtime == AMBIGUOUS_TIME: |
|
184 | elif mtime == AMBIGUOUS_TIME: | |
225 | instance = cls.new_normal(mode, size, 42) |
|
185 | instance = cls.new_normal(mode, size, 42) | |
226 | instance._mtime = None |
|
186 | instance._mtime = None | |
227 | instance._possibly_dirty = True |
|
|||
228 | return instance |
|
187 | return instance | |
229 | else: |
|
188 | else: | |
230 | return cls.new_normal(mode, size, mtime) |
|
189 | return cls.new_normal(mode, size, mtime) | |
231 | else: |
|
190 | else: | |
232 | raise RuntimeError(b'unknown state: %s' % state) |
|
191 | raise RuntimeError(b'unknown state: %s' % state) | |
233 |
|
192 | |||
234 | def set_possibly_dirty(self): |
|
193 | def set_possibly_dirty(self): | |
235 | """Mark a file as "possibly dirty" |
|
194 | """Mark a file as "possibly dirty" | |
236 |
|
195 | |||
237 | This means the next status call will have to actually check its content |
|
196 | This means the next status call will have to actually check its content | |
238 | to make sure it is correct. |
|
197 | to make sure it is correct. | |
239 | """ |
|
198 | """ | |
240 |
self._ |
|
199 | self._mtime = None | |
241 |
|
200 | |||
242 | def set_clean(self, mode, size, mtime): |
|
201 | def set_clean(self, mode, size, mtime): | |
243 | """mark a file as "clean" cancelling potential "possibly dirty call" |
|
202 | """mark a file as "clean" cancelling potential "possibly dirty call" | |
244 |
|
203 | |||
245 | Note: this function is a descendant of `dirstate.normal` and is |
|
204 | Note: this function is a descendant of `dirstate.normal` and is | |
246 | currently expected to be call on "normal" entry only. There are not |
|
205 | currently expected to be call on "normal" entry only. There are not | |
247 | reason for this to not change in the future as long as the ccode is |
|
206 | reason for this to not change in the future as long as the ccode is | |
248 | updated to preserve the proper state of the non-normal files. |
|
207 | updated to preserve the proper state of the non-normal files. | |
249 | """ |
|
208 | """ | |
250 | self._wc_tracked = True |
|
209 | self._wc_tracked = True | |
251 | self._p1_tracked = True |
|
210 | self._p1_tracked = True | |
252 | self._p2_tracked = False # this might be wrong |
|
|||
253 | self._merged = False |
|
|||
254 | self._clean_p2 = False |
|
|||
255 | self._possibly_dirty = False |
|
|||
256 | self._mode = mode |
|
211 | self._mode = mode | |
257 | self._size = size |
|
212 | self._size = size | |
258 | self._mtime = mtime |
|
213 | self._mtime = mtime | |
259 |
|
214 | |||
260 | def set_tracked(self): |
|
215 | def set_tracked(self): | |
261 | """mark a file as tracked in the working copy |
|
216 | """mark a file as tracked in the working copy | |
262 |
|
217 | |||
263 | This will ultimately be called by command like `hg add`. |
|
218 | This will ultimately be called by command like `hg add`. | |
264 | """ |
|
219 | """ | |
265 | self._wc_tracked = True |
|
220 | self._wc_tracked = True | |
266 |
# `set_tracked` is replacing various `normallookup` call. So we |
|
221 | # `set_tracked` is replacing various `normallookup` call. So we mark | |
267 | # "possibly dirty" to stay on the safe side. |
|
222 | # the files as needing lookup | |
268 | # |
|
223 | # | |
269 | # Consider dropping this in the future in favor of something less broad. |
|
224 | # Consider dropping this in the future in favor of something less broad. | |
270 |
self._ |
|
225 | self._mtime = None | |
271 |
|
226 | |||
272 | def set_untracked(self): |
|
227 | def set_untracked(self): | |
273 | """mark a file as untracked in the working copy |
|
228 | """mark a file as untracked in the working copy | |
274 |
|
229 | |||
275 | This will ultimately be called by command like `hg remove`. |
|
230 | This will ultimately be called by command like `hg remove`. | |
276 | """ |
|
231 | """ | |
277 | self._wc_tracked = False |
|
232 | self._wc_tracked = False | |
278 | self._mode = None |
|
233 | self._mode = None | |
279 | self._size = None |
|
234 | self._size = None | |
280 | self._mtime = None |
|
235 | self._mtime = None | |
281 |
|
236 | |||
282 | def drop_merge_data(self): |
|
237 | def drop_merge_data(self): | |
283 | """remove all "merge-only" from a DirstateItem |
|
238 | """remove all "merge-only" from a DirstateItem | |
284 |
|
239 | |||
285 | This is to be call by the dirstatemap code when the second parent is dropped |
|
240 | This is to be call by the dirstatemap code when the second parent is dropped | |
286 | """ |
|
241 | """ | |
287 | if not (self.merged or self.from_p2): |
|
242 | if self._p2_info: | |
288 | return |
|
243 | self._p2_info = False | |
289 | self._p1_tracked = self.merged # why is this not already properly set ? |
|
|||
290 |
|
||||
291 | self._merged = False |
|
|||
292 | self._clean_p1 = False |
|
|||
293 | self._clean_p2 = False |
|
|||
294 | self._p2_tracked = False |
|
|||
295 | self._possibly_dirty = True |
|
|||
296 | self._mode = None |
|
244 | self._mode = None | |
297 | self._size = None |
|
245 | self._size = None | |
298 | self._mtime = None |
|
246 | self._mtime = None | |
299 |
|
247 | |||
300 | @property |
|
248 | @property | |
301 | def mode(self): |
|
249 | def mode(self): | |
302 | return self.v1_mode() |
|
250 | return self.v1_mode() | |
303 |
|
251 | |||
304 | @property |
|
252 | @property | |
305 | def size(self): |
|
253 | def size(self): | |
306 | return self.v1_size() |
|
254 | return self.v1_size() | |
307 |
|
255 | |||
308 | @property |
|
256 | @property | |
309 | def mtime(self): |
|
257 | def mtime(self): | |
310 | return self.v1_mtime() |
|
258 | return self.v1_mtime() | |
311 |
|
259 | |||
312 | @property |
|
260 | @property | |
313 | def state(self): |
|
261 | def state(self): | |
314 | """ |
|
262 | """ | |
315 | States are: |
|
263 | States are: | |
316 | n normal |
|
264 | n normal | |
317 | m needs merging |
|
265 | m needs merging | |
318 | r marked for removal |
|
266 | r marked for removal | |
319 | a marked for addition |
|
267 | a marked for addition | |
320 |
|
268 | |||
321 | XXX This "state" is a bit obscure and mostly a direct expression of the |
|
269 | XXX This "state" is a bit obscure and mostly a direct expression of the | |
322 | dirstatev1 format. It would make sense to ultimately deprecate it in |
|
270 | dirstatev1 format. It would make sense to ultimately deprecate it in | |
323 | favor of the more "semantic" attributes. |
|
271 | favor of the more "semantic" attributes. | |
324 | """ |
|
272 | """ | |
325 | if not self.any_tracked: |
|
273 | if not self.any_tracked: | |
326 | return b'?' |
|
274 | return b'?' | |
327 | return self.v1_state() |
|
275 | return self.v1_state() | |
328 |
|
276 | |||
329 | @property |
|
277 | @property | |
330 | def tracked(self): |
|
278 | def tracked(self): | |
331 | """True is the file is tracked in the working copy""" |
|
279 | """True is the file is tracked in the working copy""" | |
332 | return self._wc_tracked |
|
280 | return self._wc_tracked | |
333 |
|
281 | |||
334 | @property |
|
282 | @property | |
335 | def any_tracked(self): |
|
283 | def any_tracked(self): | |
336 | """True is the file is tracked anywhere (wc or parents)""" |
|
284 | """True is the file is tracked anywhere (wc or parents)""" | |
337 |
return self._wc_tracked or self._p1_tracked or self._p2_ |
|
285 | return self._wc_tracked or self._p1_tracked or self._p2_info | |
338 |
|
286 | |||
339 | @property |
|
287 | @property | |
340 | def added(self): |
|
288 | def added(self): | |
341 | """True if the file has been added""" |
|
289 | """True if the file has been added""" | |
342 |
return self._wc_tracked and not (self._p1_tracked or self._p2_ |
|
290 | return self._wc_tracked and not (self._p1_tracked or self._p2_info) | |
343 |
|
291 | |||
344 | @property |
|
292 | @property | |
345 | def maybe_clean(self): |
|
293 | def maybe_clean(self): | |
346 | """True if the file has a chance to be in the "clean" state""" |
|
294 | """True if the file has a chance to be in the "clean" state""" | |
347 | if not self._wc_tracked: |
|
295 | if not self._wc_tracked: | |
348 | return False |
|
296 | return False | |
349 |
elif self. |
|
297 | elif not self._p1_tracked: | |
350 | return False |
|
298 | return False | |
351 |
elif self._ |
|
299 | elif self._p2_info: | |
352 | return False |
|
|||
353 | elif self._clean_p2: |
|
|||
354 | return False |
|
300 | return False | |
355 | return True |
|
301 | return True | |
356 |
|
302 | |||
357 | @property |
|
303 | @property | |
358 | def merged(self): |
|
304 | def merged(self): | |
359 | """True if the file has been merged |
|
305 | """True if the file has been merged | |
360 |
|
306 | |||
361 | Should only be set if a merge is in progress in the dirstate |
|
307 | Should only be set if a merge is in progress in the dirstate | |
362 | """ |
|
308 | """ | |
363 |
return self._wc_tracked and self._ |
|
309 | return self._wc_tracked and self._p1_tracked and self._p2_info | |
364 |
|
310 | |||
365 | @property |
|
311 | @property | |
366 | def from_p2(self): |
|
312 | def from_p2(self): | |
367 | """True if the file have been fetched from p2 during the current merge |
|
313 | """True if the file have been fetched from p2 during the current merge | |
368 |
|
314 | |||
369 | This is only True is the file is currently tracked. |
|
315 | This is only True is the file is currently tracked. | |
370 |
|
316 | |||
371 | Should only be set if a merge is in progress in the dirstate |
|
317 | Should only be set if a merge is in progress in the dirstate | |
372 | """ |
|
318 | """ | |
373 | if not self._wc_tracked: |
|
319 | return self._wc_tracked and (not self._p1_tracked) and self._p2_info | |
374 | return False |
|
|||
375 | return self._clean_p2 |
|
|||
376 |
|
320 | |||
377 | @property |
|
321 | @property | |
378 | def removed(self): |
|
322 | def removed(self): | |
379 | """True if the file has been removed""" |
|
323 | """True if the file has been removed""" | |
380 |
return not self._wc_tracked and (self._p1_tracked or self._p2_ |
|
324 | return not self._wc_tracked and (self._p1_tracked or self._p2_info) | |
381 |
|
325 | |||
382 | def v1_state(self): |
|
326 | def v1_state(self): | |
383 | """return a "state" suitable for v1 serialization""" |
|
327 | """return a "state" suitable for v1 serialization""" | |
384 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): |
|
328 | if not self.any_tracked: | |
385 | # the object has no state to record, this is -currently- |
|
329 | # the object has no state to record, this is -currently- | |
386 | # unsupported |
|
330 | # unsupported | |
387 | raise RuntimeError('untracked item') |
|
331 | raise RuntimeError('untracked item') | |
388 | elif self.removed: |
|
332 | elif self.removed: | |
389 | return b'r' |
|
333 | return b'r' | |
390 | elif self.merged: |
|
334 | elif self.merged: | |
391 | return b'm' |
|
335 | return b'm' | |
392 | elif self.added: |
|
336 | elif self.added: | |
393 | return b'a' |
|
337 | return b'a' | |
394 | else: |
|
338 | else: | |
395 | return b'n' |
|
339 | return b'n' | |
396 |
|
340 | |||
397 | def v1_mode(self): |
|
341 | def v1_mode(self): | |
398 | """return a "mode" suitable for v1 serialization""" |
|
342 | """return a "mode" suitable for v1 serialization""" | |
399 | return self._mode if self._mode is not None else 0 |
|
343 | return self._mode if self._mode is not None else 0 | |
400 |
|
344 | |||
401 | def v1_size(self): |
|
345 | def v1_size(self): | |
402 | """return a "size" suitable for v1 serialization""" |
|
346 | """return a "size" suitable for v1 serialization""" | |
403 | if not self.any_tracked: |
|
347 | if not self.any_tracked: | |
404 | # the object has no state to record, this is -currently- |
|
348 | # the object has no state to record, this is -currently- | |
405 | # unsupported |
|
349 | # unsupported | |
406 | raise RuntimeError('untracked item') |
|
350 | raise RuntimeError('untracked item') | |
407 |
elif self.removed and self._ |
|
351 | elif self.removed and self._p1_tracked and self._p2_info: | |
408 | return NONNORMAL |
|
352 | return NONNORMAL | |
409 |
elif self.removed and self. |
|
353 | elif self.removed and self._p2_info: | |
410 | return FROM_P2 |
|
354 | return FROM_P2 | |
411 | elif self.removed: |
|
355 | elif self.removed: | |
412 | return 0 |
|
356 | return 0 | |
413 | elif self.merged: |
|
357 | elif self.merged: | |
414 | return FROM_P2 |
|
358 | return FROM_P2 | |
415 | elif self.added: |
|
359 | elif self.added: | |
416 | return NONNORMAL |
|
360 | return NONNORMAL | |
417 | elif self.from_p2: |
|
361 | elif self.from_p2: | |
418 | return FROM_P2 |
|
362 | return FROM_P2 | |
419 |
elif self._ |
|
363 | elif self._size is None: | |
420 | return self._size if self._size is not None else NONNORMAL |
|
364 | return NONNORMAL | |
421 | else: |
|
365 | else: | |
422 | return self._size |
|
366 | return self._size | |
423 |
|
367 | |||
424 | def v1_mtime(self): |
|
368 | def v1_mtime(self): | |
425 | """return a "mtime" suitable for v1 serialization""" |
|
369 | """return a "mtime" suitable for v1 serialization""" | |
426 | if not self.any_tracked: |
|
370 | if not self.any_tracked: | |
427 | # the object has no state to record, this is -currently- |
|
371 | # the object has no state to record, this is -currently- | |
428 | # unsupported |
|
372 | # unsupported | |
429 | raise RuntimeError('untracked item') |
|
373 | raise RuntimeError('untracked item') | |
430 | elif self.removed: |
|
374 | elif self.removed: | |
431 | return 0 |
|
375 | return 0 | |
432 |
elif self._ |
|
376 | elif self._mtime is None: | |
433 | return AMBIGUOUS_TIME |
|
|||
434 | elif self.merged: |
|
|||
435 | return AMBIGUOUS_TIME |
|
377 | return AMBIGUOUS_TIME | |
436 |
elif self. |
|
378 | elif self._p2_info: | |
437 | return AMBIGUOUS_TIME |
|
379 | return AMBIGUOUS_TIME | |
438 |
elif self. |
|
380 | elif not self._p1_tracked: | |
439 | return AMBIGUOUS_TIME |
|
381 | return AMBIGUOUS_TIME | |
440 | else: |
|
382 | else: | |
441 |
return self._mtime |
|
383 | return self._mtime | |
442 |
|
384 | |||
443 | def need_delay(self, now): |
|
385 | def need_delay(self, now): | |
444 | """True if the stored mtime would be ambiguous with the current time""" |
|
386 | """True if the stored mtime would be ambiguous with the current time""" | |
445 | return self.v1_state() == b'n' and self.v1_mtime() == now |
|
387 | return self.v1_state() == b'n' and self.v1_mtime() == now | |
446 |
|
388 | |||
447 |
|
389 | |||
448 | def gettype(q): |
|
390 | def gettype(q): | |
449 | return int(q & 0xFFFF) |
|
391 | return int(q & 0xFFFF) | |
450 |
|
392 | |||
451 |
|
393 | |||
452 | class BaseIndexObject(object): |
|
394 | class BaseIndexObject(object): | |
453 | # Can I be passed to an algorithme implemented in Rust ? |
|
395 | # Can I be passed to an algorithme implemented in Rust ? | |
454 | rust_ext_compat = 0 |
|
396 | rust_ext_compat = 0 | |
455 | # Format of an index entry according to Python's `struct` language |
|
397 | # Format of an index entry according to Python's `struct` language | |
456 | index_format = revlog_constants.INDEX_ENTRY_V1 |
|
398 | index_format = revlog_constants.INDEX_ENTRY_V1 | |
457 | # Size of a C unsigned long long int, platform independent |
|
399 | # Size of a C unsigned long long int, platform independent | |
458 | big_int_size = struct.calcsize(b'>Q') |
|
400 | big_int_size = struct.calcsize(b'>Q') | |
459 | # Size of a C long int, platform independent |
|
401 | # Size of a C long int, platform independent | |
460 | int_size = struct.calcsize(b'>i') |
|
402 | int_size = struct.calcsize(b'>i') | |
461 | # An empty index entry, used as a default value to be overridden, or nullrev |
|
403 | # An empty index entry, used as a default value to be overridden, or nullrev | |
462 | null_item = ( |
|
404 | null_item = ( | |
463 | 0, |
|
405 | 0, | |
464 | 0, |
|
406 | 0, | |
465 | 0, |
|
407 | 0, | |
466 | -1, |
|
408 | -1, | |
467 | -1, |
|
409 | -1, | |
468 | -1, |
|
410 | -1, | |
469 | -1, |
|
411 | -1, | |
470 | sha1nodeconstants.nullid, |
|
412 | sha1nodeconstants.nullid, | |
471 | 0, |
|
413 | 0, | |
472 | 0, |
|
414 | 0, | |
473 | revlog_constants.COMP_MODE_INLINE, |
|
415 | revlog_constants.COMP_MODE_INLINE, | |
474 | revlog_constants.COMP_MODE_INLINE, |
|
416 | revlog_constants.COMP_MODE_INLINE, | |
475 | ) |
|
417 | ) | |
476 |
|
418 | |||
477 | @util.propertycache |
|
419 | @util.propertycache | |
478 | def entry_size(self): |
|
420 | def entry_size(self): | |
479 | return self.index_format.size |
|
421 | return self.index_format.size | |
480 |
|
422 | |||
481 | @property |
|
423 | @property | |
482 | def nodemap(self): |
|
424 | def nodemap(self): | |
483 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" |
|
425 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" | |
484 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
426 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) | |
485 | return self._nodemap |
|
427 | return self._nodemap | |
486 |
|
428 | |||
487 | @util.propertycache |
|
429 | @util.propertycache | |
488 | def _nodemap(self): |
|
430 | def _nodemap(self): | |
489 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) |
|
431 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) | |
490 | for r in range(0, len(self)): |
|
432 | for r in range(0, len(self)): | |
491 | n = self[r][7] |
|
433 | n = self[r][7] | |
492 | nodemap[n] = r |
|
434 | nodemap[n] = r | |
493 | return nodemap |
|
435 | return nodemap | |
494 |
|
436 | |||
495 | def has_node(self, node): |
|
437 | def has_node(self, node): | |
496 | """return True if the node exist in the index""" |
|
438 | """return True if the node exist in the index""" | |
497 | return node in self._nodemap |
|
439 | return node in self._nodemap | |
498 |
|
440 | |||
499 | def rev(self, node): |
|
441 | def rev(self, node): | |
500 | """return a revision for a node |
|
442 | """return a revision for a node | |
501 |
|
443 | |||
502 | If the node is unknown, raise a RevlogError""" |
|
444 | If the node is unknown, raise a RevlogError""" | |
503 | return self._nodemap[node] |
|
445 | return self._nodemap[node] | |
504 |
|
446 | |||
505 | def get_rev(self, node): |
|
447 | def get_rev(self, node): | |
506 | """return a revision for a node |
|
448 | """return a revision for a node | |
507 |
|
449 | |||
508 | If the node is unknown, return None""" |
|
450 | If the node is unknown, return None""" | |
509 | return self._nodemap.get(node) |
|
451 | return self._nodemap.get(node) | |
510 |
|
452 | |||
511 | def _stripnodes(self, start): |
|
453 | def _stripnodes(self, start): | |
512 | if '_nodemap' in vars(self): |
|
454 | if '_nodemap' in vars(self): | |
513 | for r in range(start, len(self)): |
|
455 | for r in range(start, len(self)): | |
514 | n = self[r][7] |
|
456 | n = self[r][7] | |
515 | del self._nodemap[n] |
|
457 | del self._nodemap[n] | |
516 |
|
458 | |||
517 | def clearcaches(self): |
|
459 | def clearcaches(self): | |
518 | self.__dict__.pop('_nodemap', None) |
|
460 | self.__dict__.pop('_nodemap', None) | |
519 |
|
461 | |||
520 | def __len__(self): |
|
462 | def __len__(self): | |
521 | return self._lgt + len(self._extra) |
|
463 | return self._lgt + len(self._extra) | |
522 |
|
464 | |||
523 | def append(self, tup): |
|
465 | def append(self, tup): | |
524 | if '_nodemap' in vars(self): |
|
466 | if '_nodemap' in vars(self): | |
525 | self._nodemap[tup[7]] = len(self) |
|
467 | self._nodemap[tup[7]] = len(self) | |
526 | data = self._pack_entry(len(self), tup) |
|
468 | data = self._pack_entry(len(self), tup) | |
527 | self._extra.append(data) |
|
469 | self._extra.append(data) | |
528 |
|
470 | |||
529 | def _pack_entry(self, rev, entry): |
|
471 | def _pack_entry(self, rev, entry): | |
530 | assert entry[8] == 0 |
|
472 | assert entry[8] == 0 | |
531 | assert entry[9] == 0 |
|
473 | assert entry[9] == 0 | |
532 | return self.index_format.pack(*entry[:8]) |
|
474 | return self.index_format.pack(*entry[:8]) | |
533 |
|
475 | |||
534 | def _check_index(self, i): |
|
476 | def _check_index(self, i): | |
535 | if not isinstance(i, int): |
|
477 | if not isinstance(i, int): | |
536 | raise TypeError(b"expecting int indexes") |
|
478 | raise TypeError(b"expecting int indexes") | |
537 | if i < 0 or i >= len(self): |
|
479 | if i < 0 or i >= len(self): | |
538 | raise IndexError |
|
480 | raise IndexError | |
539 |
|
481 | |||
540 | def __getitem__(self, i): |
|
482 | def __getitem__(self, i): | |
541 | if i == -1: |
|
483 | if i == -1: | |
542 | return self.null_item |
|
484 | return self.null_item | |
543 | self._check_index(i) |
|
485 | self._check_index(i) | |
544 | if i >= self._lgt: |
|
486 | if i >= self._lgt: | |
545 | data = self._extra[i - self._lgt] |
|
487 | data = self._extra[i - self._lgt] | |
546 | else: |
|
488 | else: | |
547 | index = self._calculate_index(i) |
|
489 | index = self._calculate_index(i) | |
548 | data = self._data[index : index + self.entry_size] |
|
490 | data = self._data[index : index + self.entry_size] | |
549 | r = self._unpack_entry(i, data) |
|
491 | r = self._unpack_entry(i, data) | |
550 | if self._lgt and i == 0: |
|
492 | if self._lgt and i == 0: | |
551 | offset = revlogutils.offset_type(0, gettype(r[0])) |
|
493 | offset = revlogutils.offset_type(0, gettype(r[0])) | |
552 | r = (offset,) + r[1:] |
|
494 | r = (offset,) + r[1:] | |
553 | return r |
|
495 | return r | |
554 |
|
496 | |||
555 | def _unpack_entry(self, rev, data): |
|
497 | def _unpack_entry(self, rev, data): | |
556 | r = self.index_format.unpack(data) |
|
498 | r = self.index_format.unpack(data) | |
557 | r = r + ( |
|
499 | r = r + ( | |
558 | 0, |
|
500 | 0, | |
559 | 0, |
|
501 | 0, | |
560 | revlog_constants.COMP_MODE_INLINE, |
|
502 | revlog_constants.COMP_MODE_INLINE, | |
561 | revlog_constants.COMP_MODE_INLINE, |
|
503 | revlog_constants.COMP_MODE_INLINE, | |
562 | ) |
|
504 | ) | |
563 | return r |
|
505 | return r | |
564 |
|
506 | |||
565 | def pack_header(self, header): |
|
507 | def pack_header(self, header): | |
566 | """pack header information as binary""" |
|
508 | """pack header information as binary""" | |
567 | v_fmt = revlog_constants.INDEX_HEADER |
|
509 | v_fmt = revlog_constants.INDEX_HEADER | |
568 | return v_fmt.pack(header) |
|
510 | return v_fmt.pack(header) | |
569 |
|
511 | |||
570 | def entry_binary(self, rev): |
|
512 | def entry_binary(self, rev): | |
571 | """return the raw binary string representing a revision""" |
|
513 | """return the raw binary string representing a revision""" | |
572 | entry = self[rev] |
|
514 | entry = self[rev] | |
573 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) |
|
515 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) | |
574 | if rev == 0: |
|
516 | if rev == 0: | |
575 | p = p[revlog_constants.INDEX_HEADER.size :] |
|
517 | p = p[revlog_constants.INDEX_HEADER.size :] | |
576 | return p |
|
518 | return p | |
577 |
|
519 | |||
578 |
|
520 | |||
579 | class IndexObject(BaseIndexObject): |
|
521 | class IndexObject(BaseIndexObject): | |
580 | def __init__(self, data): |
|
522 | def __init__(self, data): | |
581 | assert len(data) % self.entry_size == 0, ( |
|
523 | assert len(data) % self.entry_size == 0, ( | |
582 | len(data), |
|
524 | len(data), | |
583 | self.entry_size, |
|
525 | self.entry_size, | |
584 | len(data) % self.entry_size, |
|
526 | len(data) % self.entry_size, | |
585 | ) |
|
527 | ) | |
586 | self._data = data |
|
528 | self._data = data | |
587 | self._lgt = len(data) // self.entry_size |
|
529 | self._lgt = len(data) // self.entry_size | |
588 | self._extra = [] |
|
530 | self._extra = [] | |
589 |
|
531 | |||
590 | def _calculate_index(self, i): |
|
532 | def _calculate_index(self, i): | |
591 | return i * self.entry_size |
|
533 | return i * self.entry_size | |
592 |
|
534 | |||
593 | def __delitem__(self, i): |
|
535 | def __delitem__(self, i): | |
594 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
536 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | |
595 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
537 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | |
596 | i = i.start |
|
538 | i = i.start | |
597 | self._check_index(i) |
|
539 | self._check_index(i) | |
598 | self._stripnodes(i) |
|
540 | self._stripnodes(i) | |
599 | if i < self._lgt: |
|
541 | if i < self._lgt: | |
600 | self._data = self._data[: i * self.entry_size] |
|
542 | self._data = self._data[: i * self.entry_size] | |
601 | self._lgt = i |
|
543 | self._lgt = i | |
602 | self._extra = [] |
|
544 | self._extra = [] | |
603 | else: |
|
545 | else: | |
604 | self._extra = self._extra[: i - self._lgt] |
|
546 | self._extra = self._extra[: i - self._lgt] | |
605 |
|
547 | |||
606 |
|
548 | |||
607 | class PersistentNodeMapIndexObject(IndexObject): |
|
549 | class PersistentNodeMapIndexObject(IndexObject): | |
608 | """a Debug oriented class to test persistent nodemap |
|
550 | """a Debug oriented class to test persistent nodemap | |
609 |
|
551 | |||
610 | We need a simple python object to test API and higher level behavior. See |
|
552 | We need a simple python object to test API and higher level behavior. See | |
611 | the Rust implementation for more serious usage. This should be used only |
|
553 | the Rust implementation for more serious usage. This should be used only | |
612 | through the dedicated `devel.persistent-nodemap` config. |
|
554 | through the dedicated `devel.persistent-nodemap` config. | |
613 | """ |
|
555 | """ | |
614 |
|
556 | |||
615 | def nodemap_data_all(self): |
|
557 | def nodemap_data_all(self): | |
616 | """Return bytes containing a full serialization of a nodemap |
|
558 | """Return bytes containing a full serialization of a nodemap | |
617 |
|
559 | |||
618 | The nodemap should be valid for the full set of revisions in the |
|
560 | The nodemap should be valid for the full set of revisions in the | |
619 | index.""" |
|
561 | index.""" | |
620 | return nodemaputil.persistent_data(self) |
|
562 | return nodemaputil.persistent_data(self) | |
621 |
|
563 | |||
622 | def nodemap_data_incremental(self): |
|
564 | def nodemap_data_incremental(self): | |
623 | """Return bytes containing a incremental update to persistent nodemap |
|
565 | """Return bytes containing a incremental update to persistent nodemap | |
624 |
|
566 | |||
625 | This containst the data for an append-only update of the data provided |
|
567 | This containst the data for an append-only update of the data provided | |
626 | in the last call to `update_nodemap_data`. |
|
568 | in the last call to `update_nodemap_data`. | |
627 | """ |
|
569 | """ | |
628 | if self._nm_root is None: |
|
570 | if self._nm_root is None: | |
629 | return None |
|
571 | return None | |
630 | docket = self._nm_docket |
|
572 | docket = self._nm_docket | |
631 | changed, data = nodemaputil.update_persistent_data( |
|
573 | changed, data = nodemaputil.update_persistent_data( | |
632 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev |
|
574 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev | |
633 | ) |
|
575 | ) | |
634 |
|
576 | |||
635 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
577 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |
636 | return docket, changed, data |
|
578 | return docket, changed, data | |
637 |
|
579 | |||
638 | def update_nodemap_data(self, docket, nm_data): |
|
580 | def update_nodemap_data(self, docket, nm_data): | |
639 | """provide full block of persisted binary data for a nodemap |
|
581 | """provide full block of persisted binary data for a nodemap | |
640 |
|
582 | |||
641 | The data are expected to come from disk. See `nodemap_data_all` for a |
|
583 | The data are expected to come from disk. See `nodemap_data_all` for a | |
642 | produceur of such data.""" |
|
584 | produceur of such data.""" | |
643 | if nm_data is not None: |
|
585 | if nm_data is not None: | |
644 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) |
|
586 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) | |
645 | if self._nm_root: |
|
587 | if self._nm_root: | |
646 | self._nm_docket = docket |
|
588 | self._nm_docket = docket | |
647 | else: |
|
589 | else: | |
648 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
590 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |
649 |
|
591 | |||
650 |
|
592 | |||
651 | class InlinedIndexObject(BaseIndexObject): |
|
593 | class InlinedIndexObject(BaseIndexObject): | |
652 | def __init__(self, data, inline=0): |
|
594 | def __init__(self, data, inline=0): | |
653 | self._data = data |
|
595 | self._data = data | |
654 | self._lgt = self._inline_scan(None) |
|
596 | self._lgt = self._inline_scan(None) | |
655 | self._inline_scan(self._lgt) |
|
597 | self._inline_scan(self._lgt) | |
656 | self._extra = [] |
|
598 | self._extra = [] | |
657 |
|
599 | |||
658 | def _inline_scan(self, lgt): |
|
600 | def _inline_scan(self, lgt): | |
659 | off = 0 |
|
601 | off = 0 | |
660 | if lgt is not None: |
|
602 | if lgt is not None: | |
661 | self._offsets = [0] * lgt |
|
603 | self._offsets = [0] * lgt | |
662 | count = 0 |
|
604 | count = 0 | |
663 | while off <= len(self._data) - self.entry_size: |
|
605 | while off <= len(self._data) - self.entry_size: | |
664 | start = off + self.big_int_size |
|
606 | start = off + self.big_int_size | |
665 | (s,) = struct.unpack( |
|
607 | (s,) = struct.unpack( | |
666 | b'>i', |
|
608 | b'>i', | |
667 | self._data[start : start + self.int_size], |
|
609 | self._data[start : start + self.int_size], | |
668 | ) |
|
610 | ) | |
669 | if lgt is not None: |
|
611 | if lgt is not None: | |
670 | self._offsets[count] = off |
|
612 | self._offsets[count] = off | |
671 | count += 1 |
|
613 | count += 1 | |
672 | off += self.entry_size + s |
|
614 | off += self.entry_size + s | |
673 | if off != len(self._data): |
|
615 | if off != len(self._data): | |
674 | raise ValueError(b"corrupted data") |
|
616 | raise ValueError(b"corrupted data") | |
675 | return count |
|
617 | return count | |
676 |
|
618 | |||
677 | def __delitem__(self, i): |
|
619 | def __delitem__(self, i): | |
678 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
620 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | |
679 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
621 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | |
680 | i = i.start |
|
622 | i = i.start | |
681 | self._check_index(i) |
|
623 | self._check_index(i) | |
682 | self._stripnodes(i) |
|
624 | self._stripnodes(i) | |
683 | if i < self._lgt: |
|
625 | if i < self._lgt: | |
684 | self._offsets = self._offsets[:i] |
|
626 | self._offsets = self._offsets[:i] | |
685 | self._lgt = i |
|
627 | self._lgt = i | |
686 | self._extra = [] |
|
628 | self._extra = [] | |
687 | else: |
|
629 | else: | |
688 | self._extra = self._extra[: i - self._lgt] |
|
630 | self._extra = self._extra[: i - self._lgt] | |
689 |
|
631 | |||
690 | def _calculate_index(self, i): |
|
632 | def _calculate_index(self, i): | |
691 | return self._offsets[i] |
|
633 | return self._offsets[i] | |
692 |
|
634 | |||
693 |
|
635 | |||
694 | def parse_index2(data, inline, revlogv2=False): |
|
636 | def parse_index2(data, inline, revlogv2=False): | |
695 | if not inline: |
|
637 | if not inline: | |
696 | cls = IndexObject2 if revlogv2 else IndexObject |
|
638 | cls = IndexObject2 if revlogv2 else IndexObject | |
697 | return cls(data), None |
|
639 | return cls(data), None | |
698 | cls = InlinedIndexObject |
|
640 | cls = InlinedIndexObject | |
699 | return cls(data, inline), (0, data) |
|
641 | return cls(data, inline), (0, data) | |
700 |
|
642 | |||
701 |
|
643 | |||
702 | def parse_index_cl_v2(data): |
|
644 | def parse_index_cl_v2(data): | |
703 | return IndexChangelogV2(data), None |
|
645 | return IndexChangelogV2(data), None | |
704 |
|
646 | |||
705 |
|
647 | |||
706 | class IndexObject2(IndexObject): |
|
648 | class IndexObject2(IndexObject): | |
707 | index_format = revlog_constants.INDEX_ENTRY_V2 |
|
649 | index_format = revlog_constants.INDEX_ENTRY_V2 | |
708 |
|
650 | |||
709 | def replace_sidedata_info( |
|
651 | def replace_sidedata_info( | |
710 | self, |
|
652 | self, | |
711 | rev, |
|
653 | rev, | |
712 | sidedata_offset, |
|
654 | sidedata_offset, | |
713 | sidedata_length, |
|
655 | sidedata_length, | |
714 | offset_flags, |
|
656 | offset_flags, | |
715 | compression_mode, |
|
657 | compression_mode, | |
716 | ): |
|
658 | ): | |
717 | """ |
|
659 | """ | |
718 | Replace an existing index entry's sidedata offset and length with new |
|
660 | Replace an existing index entry's sidedata offset and length with new | |
719 | ones. |
|
661 | ones. | |
720 | This cannot be used outside of the context of sidedata rewriting, |
|
662 | This cannot be used outside of the context of sidedata rewriting, | |
721 | inside the transaction that creates the revision `rev`. |
|
663 | inside the transaction that creates the revision `rev`. | |
722 | """ |
|
664 | """ | |
723 | if rev < 0: |
|
665 | if rev < 0: | |
724 | raise KeyError |
|
666 | raise KeyError | |
725 | self._check_index(rev) |
|
667 | self._check_index(rev) | |
726 | if rev < self._lgt: |
|
668 | if rev < self._lgt: | |
727 | msg = b"cannot rewrite entries outside of this transaction" |
|
669 | msg = b"cannot rewrite entries outside of this transaction" | |
728 | raise KeyError(msg) |
|
670 | raise KeyError(msg) | |
729 | else: |
|
671 | else: | |
730 | entry = list(self[rev]) |
|
672 | entry = list(self[rev]) | |
731 | entry[0] = offset_flags |
|
673 | entry[0] = offset_flags | |
732 | entry[8] = sidedata_offset |
|
674 | entry[8] = sidedata_offset | |
733 | entry[9] = sidedata_length |
|
675 | entry[9] = sidedata_length | |
734 | entry[11] = compression_mode |
|
676 | entry[11] = compression_mode | |
735 | entry = tuple(entry) |
|
677 | entry = tuple(entry) | |
736 | new = self._pack_entry(rev, entry) |
|
678 | new = self._pack_entry(rev, entry) | |
737 | self._extra[rev - self._lgt] = new |
|
679 | self._extra[rev - self._lgt] = new | |
738 |
|
680 | |||
739 | def _unpack_entry(self, rev, data): |
|
681 | def _unpack_entry(self, rev, data): | |
740 | data = self.index_format.unpack(data) |
|
682 | data = self.index_format.unpack(data) | |
741 | entry = data[:10] |
|
683 | entry = data[:10] | |
742 | data_comp = data[10] & 3 |
|
684 | data_comp = data[10] & 3 | |
743 | sidedata_comp = (data[10] & (3 << 2)) >> 2 |
|
685 | sidedata_comp = (data[10] & (3 << 2)) >> 2 | |
744 | return entry + (data_comp, sidedata_comp) |
|
686 | return entry + (data_comp, sidedata_comp) | |
745 |
|
687 | |||
746 | def _pack_entry(self, rev, entry): |
|
688 | def _pack_entry(self, rev, entry): | |
747 | data = entry[:10] |
|
689 | data = entry[:10] | |
748 | data_comp = entry[10] & 3 |
|
690 | data_comp = entry[10] & 3 | |
749 | sidedata_comp = (entry[11] & 3) << 2 |
|
691 | sidedata_comp = (entry[11] & 3) << 2 | |
750 | data += (data_comp | sidedata_comp,) |
|
692 | data += (data_comp | sidedata_comp,) | |
751 |
|
693 | |||
752 | return self.index_format.pack(*data) |
|
694 | return self.index_format.pack(*data) | |
753 |
|
695 | |||
754 | def entry_binary(self, rev): |
|
696 | def entry_binary(self, rev): | |
755 | """return the raw binary string representing a revision""" |
|
697 | """return the raw binary string representing a revision""" | |
756 | entry = self[rev] |
|
698 | entry = self[rev] | |
757 | return self._pack_entry(rev, entry) |
|
699 | return self._pack_entry(rev, entry) | |
758 |
|
700 | |||
759 | def pack_header(self, header): |
|
701 | def pack_header(self, header): | |
760 | """pack header information as binary""" |
|
702 | """pack header information as binary""" | |
761 | msg = 'version header should go in the docket, not the index: %d' |
|
703 | msg = 'version header should go in the docket, not the index: %d' | |
762 | msg %= header |
|
704 | msg %= header | |
763 | raise error.ProgrammingError(msg) |
|
705 | raise error.ProgrammingError(msg) | |
764 |
|
706 | |||
765 |
|
707 | |||
766 | class IndexChangelogV2(IndexObject2): |
|
708 | class IndexChangelogV2(IndexObject2): | |
767 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 |
|
709 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 | |
768 |
|
710 | |||
769 | def _unpack_entry(self, rev, data, r=True): |
|
711 | def _unpack_entry(self, rev, data, r=True): | |
770 | items = self.index_format.unpack(data) |
|
712 | items = self.index_format.unpack(data) | |
771 | entry = items[:3] + (rev, rev) + items[3:8] |
|
713 | entry = items[:3] + (rev, rev) + items[3:8] | |
772 | data_comp = items[8] & 3 |
|
714 | data_comp = items[8] & 3 | |
773 | sidedata_comp = (items[8] >> 2) & 3 |
|
715 | sidedata_comp = (items[8] >> 2) & 3 | |
774 | return entry + (data_comp, sidedata_comp) |
|
716 | return entry + (data_comp, sidedata_comp) | |
775 |
|
717 | |||
776 | def _pack_entry(self, rev, entry): |
|
718 | def _pack_entry(self, rev, entry): | |
777 | assert entry[3] == rev, entry[3] |
|
719 | assert entry[3] == rev, entry[3] | |
778 | assert entry[4] == rev, entry[4] |
|
720 | assert entry[4] == rev, entry[4] | |
779 | data = entry[:3] + entry[5:10] |
|
721 | data = entry[:3] + entry[5:10] | |
780 | data_comp = entry[10] & 3 |
|
722 | data_comp = entry[10] & 3 | |
781 | sidedata_comp = (entry[11] & 3) << 2 |
|
723 | sidedata_comp = (entry[11] & 3) << 2 | |
782 | data += (data_comp | sidedata_comp,) |
|
724 | data += (data_comp | sidedata_comp,) | |
783 | return self.index_format.pack(*data) |
|
725 | return self.index_format.pack(*data) | |
784 |
|
726 | |||
785 |
|
727 | |||
786 | def parse_index_devel_nodemap(data, inline): |
|
728 | def parse_index_devel_nodemap(data, inline): | |
787 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" |
|
729 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" | |
788 | return PersistentNodeMapIndexObject(data), None |
|
730 | return PersistentNodeMapIndexObject(data), None | |
789 |
|
731 | |||
790 |
|
732 | |||
791 | def parse_dirstate(dmap, copymap, st): |
|
733 | def parse_dirstate(dmap, copymap, st): | |
792 | parents = [st[:20], st[20:40]] |
|
734 | parents = [st[:20], st[20:40]] | |
793 | # dereference fields so they will be local in loop |
|
735 | # dereference fields so they will be local in loop | |
794 | format = b">cllll" |
|
736 | format = b">cllll" | |
795 | e_size = struct.calcsize(format) |
|
737 | e_size = struct.calcsize(format) | |
796 | pos1 = 40 |
|
738 | pos1 = 40 | |
797 | l = len(st) |
|
739 | l = len(st) | |
798 |
|
740 | |||
799 | # the inner loop |
|
741 | # the inner loop | |
800 | while pos1 < l: |
|
742 | while pos1 < l: | |
801 | pos2 = pos1 + e_size |
|
743 | pos2 = pos1 + e_size | |
802 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster |
|
744 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster | |
803 | pos1 = pos2 + e[4] |
|
745 | pos1 = pos2 + e[4] | |
804 | f = st[pos2:pos1] |
|
746 | f = st[pos2:pos1] | |
805 | if b'\0' in f: |
|
747 | if b'\0' in f: | |
806 | f, c = f.split(b'\0') |
|
748 | f, c = f.split(b'\0') | |
807 | copymap[f] = c |
|
749 | copymap[f] = c | |
808 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) |
|
750 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) | |
809 | return parents |
|
751 | return parents | |
810 |
|
752 | |||
811 |
|
753 | |||
812 | def pack_dirstate(dmap, copymap, pl, now): |
|
754 | def pack_dirstate(dmap, copymap, pl, now): | |
813 | now = int(now) |
|
755 | now = int(now) | |
814 | cs = stringio() |
|
756 | cs = stringio() | |
815 | write = cs.write |
|
757 | write = cs.write | |
816 | write(b"".join(pl)) |
|
758 | write(b"".join(pl)) | |
817 | for f, e in pycompat.iteritems(dmap): |
|
759 | for f, e in pycompat.iteritems(dmap): | |
818 | if e.need_delay(now): |
|
760 | if e.need_delay(now): | |
819 | # The file was last modified "simultaneously" with the current |
|
761 | # The file was last modified "simultaneously" with the current | |
820 | # write to dirstate (i.e. within the same second for file- |
|
762 | # write to dirstate (i.e. within the same second for file- | |
821 | # systems with a granularity of 1 sec). This commonly happens |
|
763 | # systems with a granularity of 1 sec). This commonly happens | |
822 | # for at least a couple of files on 'update'. |
|
764 | # for at least a couple of files on 'update'. | |
823 | # The user could change the file without changing its size |
|
765 | # The user could change the file without changing its size | |
824 | # within the same second. Invalidate the file's mtime in |
|
766 | # within the same second. Invalidate the file's mtime in | |
825 | # dirstate, forcing future 'status' calls to compare the |
|
767 | # dirstate, forcing future 'status' calls to compare the | |
826 | # contents of the file if the size is the same. This prevents |
|
768 | # contents of the file if the size is the same. This prevents | |
827 | # mistakenly treating such files as clean. |
|
769 | # mistakenly treating such files as clean. | |
828 | e.set_possibly_dirty() |
|
770 | e.set_possibly_dirty() | |
829 |
|
771 | |||
830 | if f in copymap: |
|
772 | if f in copymap: | |
831 | f = b"%s\0%s" % (f, copymap[f]) |
|
773 | f = b"%s\0%s" % (f, copymap[f]) | |
832 | e = _pack( |
|
774 | e = _pack( | |
833 | b">cllll", |
|
775 | b">cllll", | |
834 | e.v1_state(), |
|
776 | e.v1_state(), | |
835 | e.v1_mode(), |
|
777 | e.v1_mode(), | |
836 | e.v1_size(), |
|
778 | e.v1_size(), | |
837 | e.v1_mtime(), |
|
779 | e.v1_mtime(), | |
838 | len(f), |
|
780 | len(f), | |
839 | ) |
|
781 | ) | |
840 | write(e) |
|
782 | write(e) | |
841 | write(f) |
|
783 | write(f) | |
842 | return cs.getvalue() |
|
784 | return cs.getvalue() |
@@ -1,427 +1,413 | |||||
1 | use crate::errors::HgError; |
|
1 | use crate::errors::HgError; | |
2 | use bitflags::bitflags; |
|
2 | use bitflags::bitflags; | |
3 | use std::convert::TryFrom; |
|
3 | use std::convert::TryFrom; | |
4 |
|
4 | |||
5 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] |
|
5 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] | |
6 | pub enum EntryState { |
|
6 | pub enum EntryState { | |
7 | Normal, |
|
7 | Normal, | |
8 | Added, |
|
8 | Added, | |
9 | Removed, |
|
9 | Removed, | |
10 | Merged, |
|
10 | Merged, | |
11 | } |
|
11 | } | |
12 |
|
12 | |||
13 | /// The C implementation uses all signed types. This will be an issue |
|
13 | /// The C implementation uses all signed types. This will be an issue | |
14 | /// either when 4GB+ source files are commonplace or in 2038, whichever |
|
14 | /// either when 4GB+ source files are commonplace or in 2038, whichever | |
15 | /// comes first. |
|
15 | /// comes first. | |
16 | #[derive(Debug, PartialEq, Copy, Clone)] |
|
16 | #[derive(Debug, PartialEq, Copy, Clone)] | |
17 | pub struct DirstateEntry { |
|
17 | pub struct DirstateEntry { | |
18 | flags: Flags, |
|
18 | flags: Flags, | |
19 | mode: i32, |
|
19 | mode_size: Option<(i32, i32)>, | |
20 |
|
|
20 | mtime: Option<i32>, | |
21 | mtime: i32, |
|
|||
22 | } |
|
21 | } | |
23 |
|
22 | |||
24 | bitflags! { |
|
23 | bitflags! { | |
25 |
|
|
24 | struct Flags: u8 { | |
26 | const WDIR_TRACKED = 1 << 0; |
|
25 | const WDIR_TRACKED = 1 << 0; | |
27 | const P1_TRACKED = 1 << 1; |
|
26 | const P1_TRACKED = 1 << 1; | |
28 |
const P2_ |
|
27 | const P2_INFO = 1 << 2; | |
29 | const POSSIBLY_DIRTY = 1 << 3; |
|
|||
30 | const MERGED = 1 << 4; |
|
|||
31 | const CLEAN_P1 = 1 << 5; |
|
|||
32 | const CLEAN_P2 = 1 << 6; |
|
|||
33 | const ENTRYLESS_TREE_NODE = 1 << 7; |
|
|||
34 | } |
|
28 | } | |
35 | } |
|
29 | } | |
36 |
|
30 | |||
37 | pub const V1_RANGEMASK: i32 = 0x7FFFFFFF; |
|
31 | pub const V1_RANGEMASK: i32 = 0x7FFFFFFF; | |
38 |
|
32 | |||
39 | pub const MTIME_UNSET: i32 = -1; |
|
33 | pub const MTIME_UNSET: i32 = -1; | |
40 |
|
34 | |||
41 | /// A `DirstateEntry` with a size of `-2` means that it was merged from the |
|
35 | /// A `DirstateEntry` with a size of `-2` means that it was merged from the | |
42 | /// other parent. This allows revert to pick the right status back during a |
|
36 | /// other parent. This allows revert to pick the right status back during a | |
43 | /// merge. |
|
37 | /// merge. | |
44 | pub const SIZE_FROM_OTHER_PARENT: i32 = -2; |
|
38 | pub const SIZE_FROM_OTHER_PARENT: i32 = -2; | |
45 | /// A special value used for internal representation of special case in |
|
39 | /// A special value used for internal representation of special case in | |
46 | /// dirstate v1 format. |
|
40 | /// dirstate v1 format. | |
47 | pub const SIZE_NON_NORMAL: i32 = -1; |
|
41 | pub const SIZE_NON_NORMAL: i32 = -1; | |
48 |
|
42 | |||
49 | impl DirstateEntry { |
|
43 | impl DirstateEntry { | |
50 | pub fn new( |
|
44 | pub fn new( | |
51 | flags: Flags, |
|
45 | wdir_tracked: bool, | |
52 | mode_size_mtime: Option<(i32, i32, i32)>, |
|
46 | p1_tracked: bool, | |
|
47 | p2_info: bool, | |||
|
48 | mode_size: Option<(i32, i32)>, | |||
|
49 | mtime: Option<i32>, | |||
53 | ) -> Self { |
|
50 | ) -> Self { | |
54 | let (mode, size, mtime) = |
|
51 | let mut flags = Flags::empty(); | |
55 | mode_size_mtime.unwrap_or((0, SIZE_NON_NORMAL, MTIME_UNSET)); |
|
52 | flags.set(Flags::WDIR_TRACKED, wdir_tracked); | |
|
53 | flags.set(Flags::P1_TRACKED, p1_tracked); | |||
|
54 | flags.set(Flags::P2_INFO, p2_info); | |||
56 | Self { |
|
55 | Self { | |
57 | flags, |
|
56 | flags, | |
58 | mode, |
|
57 | mode_size, | |
59 | size, |
|
|||
60 | mtime, |
|
58 | mtime, | |
61 | } |
|
59 | } | |
62 | } |
|
60 | } | |
63 |
|
61 | |||
64 | pub fn from_v1_data( |
|
62 | pub fn from_v1_data( | |
65 | state: EntryState, |
|
63 | state: EntryState, | |
66 | mode: i32, |
|
64 | mode: i32, | |
67 | size: i32, |
|
65 | size: i32, | |
68 | mtime: i32, |
|
66 | mtime: i32, | |
69 | ) -> Self { |
|
67 | ) -> Self { | |
70 | match state { |
|
68 | match state { | |
71 | EntryState::Normal => { |
|
69 | EntryState::Normal => { | |
72 | if size == SIZE_FROM_OTHER_PARENT { |
|
70 | if size == SIZE_FROM_OTHER_PARENT { | |
73 | Self::new_from_p2() |
|
71 | Self::new_from_p2() | |
74 | } else if size == SIZE_NON_NORMAL { |
|
72 | } else if size == SIZE_NON_NORMAL { | |
75 | Self::new_possibly_dirty() |
|
73 | Self::new_possibly_dirty() | |
76 | } else if mtime == MTIME_UNSET { |
|
74 | } else if mtime == MTIME_UNSET { | |
77 | Self { |
|
75 | Self { | |
78 | flags: Flags::WDIR_TRACKED |
|
76 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, | |
79 | | Flags::P1_TRACKED |
|
77 | mode_size: Some((mode, size)), | |
80 | | Flags::POSSIBLY_DIRTY, |
|
78 | mtime: None, | |
81 | mode, |
|
|||
82 | size, |
|
|||
83 | mtime: 0, |
|
|||
84 | } |
|
79 | } | |
85 | } else { |
|
80 | } else { | |
86 | Self::new_normal(mode, size, mtime) |
|
81 | Self::new_normal(mode, size, mtime) | |
87 | } |
|
82 | } | |
88 | } |
|
83 | } | |
89 | EntryState::Added => Self::new_added(), |
|
84 | EntryState::Added => Self::new_added(), | |
90 | EntryState::Removed => Self { |
|
85 | EntryState::Removed => Self { | |
91 | flags: if size == SIZE_NON_NORMAL { |
|
86 | flags: if size == SIZE_NON_NORMAL { | |
92 |
Flags::P1_TRACKED |
|
87 | Flags::P1_TRACKED | Flags::P2_INFO | |
93 | | Flags::P2_TRACKED // might not be true because of rename ? |
|
|||
94 | | Flags::MERGED |
|
|||
95 | } else if size == SIZE_FROM_OTHER_PARENT { |
|
88 | } else if size == SIZE_FROM_OTHER_PARENT { | |
96 | // We don’t know if P1_TRACKED should be set (file history) |
|
89 | // We don’t know if P1_TRACKED should be set (file history) | |
97 |
Flags::P2_ |
|
90 | Flags::P2_INFO | |
98 | } else { |
|
91 | } else { | |
99 | Flags::P1_TRACKED |
|
92 | Flags::P1_TRACKED | |
100 | }, |
|
93 | }, | |
101 |
mode: |
|
94 | mode_size: None, | |
102 |
|
|
95 | mtime: None, | |
103 | mtime: 0, |
|
|||
104 | }, |
|
96 | }, | |
105 | EntryState::Merged => Self::new_merged(), |
|
97 | EntryState::Merged => Self::new_merged(), | |
106 | } |
|
98 | } | |
107 | } |
|
99 | } | |
108 |
|
100 | |||
109 | pub fn new_from_p2() -> Self { |
|
101 | pub fn new_from_p2() -> Self { | |
110 | Self { |
|
102 | Self { | |
111 | // might be missing P1_TRACKED |
|
103 | // might be missing P1_TRACKED | |
112 |
flags: Flags::WDIR_TRACKED | Flags::P2_ |
|
104 | flags: Flags::WDIR_TRACKED | Flags::P2_INFO, | |
113 |
mode: |
|
105 | mode_size: None, | |
114 | size: SIZE_FROM_OTHER_PARENT, |
|
106 | mtime: None, | |
115 | mtime: MTIME_UNSET, |
|
|||
116 | } |
|
107 | } | |
117 | } |
|
108 | } | |
118 |
|
109 | |||
119 | pub fn new_possibly_dirty() -> Self { |
|
110 | pub fn new_possibly_dirty() -> Self { | |
120 | Self { |
|
111 | Self { | |
121 | flags: Flags::WDIR_TRACKED |
|
112 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, | |
122 | | Flags::P1_TRACKED |
|
113 | mode_size: None, | |
123 | | Flags::POSSIBLY_DIRTY, |
|
114 | mtime: None, | |
124 | mode: 0, |
|
|||
125 | size: SIZE_NON_NORMAL, |
|
|||
126 | mtime: MTIME_UNSET, |
|
|||
127 | } |
|
115 | } | |
128 | } |
|
116 | } | |
129 |
|
117 | |||
130 | pub fn new_added() -> Self { |
|
118 | pub fn new_added() -> Self { | |
131 | Self { |
|
119 | Self { | |
132 | flags: Flags::WDIR_TRACKED, |
|
120 | flags: Flags::WDIR_TRACKED, | |
133 |
mode: |
|
121 | mode_size: None, | |
134 |
|
|
122 | mtime: None, | |
135 | mtime: MTIME_UNSET, |
|
|||
136 | } |
|
123 | } | |
137 | } |
|
124 | } | |
138 |
|
125 | |||
139 | pub fn new_merged() -> Self { |
|
126 | pub fn new_merged() -> Self { | |
140 | Self { |
|
127 | Self { | |
141 | flags: Flags::WDIR_TRACKED |
|
128 | flags: Flags::WDIR_TRACKED | |
142 | | Flags::P1_TRACKED // might not be true because of rename ? |
|
129 | | Flags::P1_TRACKED // might not be true because of rename ? | |
143 |
| Flags::P2_ |
|
130 | | Flags::P2_INFO, // might not be true because of rename ? | |
144 | | Flags::MERGED, |
|
131 | mode_size: None, | |
145 |
m |
|
132 | mtime: None, | |
146 | size: SIZE_NON_NORMAL, |
|
|||
147 | mtime: MTIME_UNSET, |
|
|||
148 | } |
|
133 | } | |
149 | } |
|
134 | } | |
150 |
|
135 | |||
151 | pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self { |
|
136 | pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self { | |
152 | Self { |
|
137 | Self { | |
153 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, |
|
138 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, | |
154 | mode, |
|
139 | mode_size: Some((mode, size)), | |
155 |
|
|
140 | mtime: Some(mtime), | |
156 | mtime, |
|
|||
157 | } |
|
141 | } | |
158 | } |
|
142 | } | |
159 |
|
143 | |||
160 | /// Creates a new entry in "removed" state. |
|
144 | /// Creates a new entry in "removed" state. | |
161 | /// |
|
145 | /// | |
162 | /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or |
|
146 | /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or | |
163 | /// `SIZE_FROM_OTHER_PARENT` |
|
147 | /// `SIZE_FROM_OTHER_PARENT` | |
164 | pub fn new_removed(size: i32) -> Self { |
|
148 | pub fn new_removed(size: i32) -> Self { | |
165 | Self::from_v1_data(EntryState::Removed, 0, size, 0) |
|
149 | Self::from_v1_data(EntryState::Removed, 0, size, 0) | |
166 | } |
|
150 | } | |
167 |
|
151 | |||
168 | pub fn tracked(&self) -> bool { |
|
152 | pub fn tracked(&self) -> bool { | |
169 | self.flags.contains(Flags::WDIR_TRACKED) |
|
153 | self.flags.contains(Flags::WDIR_TRACKED) | |
170 | } |
|
154 | } | |
171 |
|
155 | |||
172 |
fn |
|
156 | fn in_either_parent(&self) -> bool { | |
173 |
self.flags.intersects(Flags::P1_TRACKED | Flags::P2_ |
|
157 | self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO) | |
174 | } |
|
158 | } | |
175 |
|
159 | |||
176 | pub fn removed(&self) -> bool { |
|
160 | pub fn removed(&self) -> bool { | |
177 | self.tracked_in_any_parent() |
|
161 | self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED) | |
178 | && !self.flags.contains(Flags::WDIR_TRACKED) |
|
|||
179 | } |
|
162 | } | |
180 |
|
163 | |||
181 | pub fn merged(&self) -> bool { |
|
164 | pub fn merged(&self) -> bool { | |
182 | self.flags.contains(Flags::WDIR_TRACKED | Flags::MERGED) |
|
165 | self.flags | |
|
166 | .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO) | |||
183 | } |
|
167 | } | |
184 |
|
168 | |||
185 | pub fn added(&self) -> bool { |
|
169 | pub fn added(&self) -> bool { | |
186 | self.flags.contains(Flags::WDIR_TRACKED) |
|
170 | self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent() | |
187 | && !self.tracked_in_any_parent() |
|
|||
188 | } |
|
171 | } | |
189 |
|
172 | |||
190 | pub fn from_p2(&self) -> bool { |
|
173 | pub fn from_p2(&self) -> bool { | |
191 |
self.flags.contains(Flags::WDIR_TRACKED | Flags:: |
|
174 | self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO) | |
|
175 | && !self.flags.contains(Flags::P1_TRACKED) | |||
192 | } |
|
176 | } | |
193 |
|
177 | |||
194 | pub fn maybe_clean(&self) -> bool { |
|
178 | pub fn maybe_clean(&self) -> bool { | |
195 | if !self.flags.contains(Flags::WDIR_TRACKED) { |
|
179 | if !self.flags.contains(Flags::WDIR_TRACKED) { | |
196 | false |
|
180 | false | |
197 |
} else if self. |
|
181 | } else if !self.flags.contains(Flags::P1_TRACKED) { | |
198 | false |
|
182 | false | |
199 |
} else if self.flags.contains(Flags:: |
|
183 | } else if self.flags.contains(Flags::P2_INFO) { | |
200 | false |
|
|||
201 | } else if self.flags.contains(Flags::CLEAN_P2) { |
|
|||
202 | false |
|
184 | false | |
203 | } else { |
|
185 | } else { | |
204 | true |
|
186 | true | |
205 | } |
|
187 | } | |
206 | } |
|
188 | } | |
207 |
|
189 | |||
208 | pub fn any_tracked(&self) -> bool { |
|
190 | pub fn any_tracked(&self) -> bool { | |
209 | self.flags.intersects( |
|
191 | self.flags.intersects( | |
210 |
Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_ |
|
192 | Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO, | |
211 | ) |
|
193 | ) | |
212 | } |
|
194 | } | |
213 |
|
195 | |||
214 |
|
|
196 | fn v1_state(&self) -> EntryState { | |
|
197 | if !self.any_tracked() { | |||
|
198 | // TODO: return an Option instead? | |||
|
199 | panic!("Accessing v1_state of an untracked DirstateEntry") | |||
|
200 | } | |||
215 | if self.removed() { |
|
201 | if self.removed() { | |
216 | EntryState::Removed |
|
202 | EntryState::Removed | |
217 | } else if self.merged() { |
|
203 | } else if self.merged() { | |
218 | EntryState::Merged |
|
204 | EntryState::Merged | |
219 | } else if self.added() { |
|
205 | } else if self.added() { | |
220 | EntryState::Added |
|
206 | EntryState::Added | |
221 | } else { |
|
207 | } else { | |
222 | EntryState::Normal |
|
208 | EntryState::Normal | |
223 | } |
|
209 | } | |
224 | } |
|
210 | } | |
225 |
|
211 | |||
226 |
|
|
212 | fn v1_mode(&self) -> i32 { | |
227 | self.mode |
|
213 | if let Some((mode, _size)) = self.mode_size { | |
|
214 | mode | |||
|
215 | } else { | |||
|
216 | 0 | |||
|
217 | } | |||
228 | } |
|
218 | } | |
229 |
|
219 | |||
230 |
|
|
220 | fn v1_size(&self) -> i32 { | |
231 | if self.removed() && self.flags.contains(Flags::MERGED) { |
|
221 | if !self.any_tracked() { | |
|
222 | // TODO: return an Option instead? | |||
|
223 | panic!("Accessing v1_size of an untracked DirstateEntry") | |||
|
224 | } | |||
|
225 | if self.removed() | |||
|
226 | && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO) | |||
|
227 | { | |||
232 | SIZE_NON_NORMAL |
|
228 | SIZE_NON_NORMAL | |
233 |
} else if self.removed() && self.flags.contains(Flags:: |
|
229 | } else if self.removed() && self.flags.contains(Flags::P2_INFO) { | |
234 | SIZE_FROM_OTHER_PARENT |
|
230 | SIZE_FROM_OTHER_PARENT | |
235 | } else if self.removed() { |
|
231 | } else if self.removed() { | |
236 | 0 |
|
232 | 0 | |
237 | } else if self.merged() { |
|
233 | } else if self.merged() { | |
238 | SIZE_FROM_OTHER_PARENT |
|
234 | SIZE_FROM_OTHER_PARENT | |
239 | } else if self.added() { |
|
235 | } else if self.added() { | |
240 | SIZE_NON_NORMAL |
|
236 | SIZE_NON_NORMAL | |
241 | } else if self.from_p2() { |
|
237 | } else if self.from_p2() { | |
242 | SIZE_FROM_OTHER_PARENT |
|
238 | SIZE_FROM_OTHER_PARENT | |
243 | } else if self.flags.contains(Flags::POSSIBLY_DIRTY) { |
|
239 | } else if let Some((_mode, size)) = self.mode_size { | |
244 | self.size // TODO: SIZE_NON_NORMAL ? |
|
240 | size | |
245 | } else { |
|
241 | } else { | |
246 | self.size |
|
242 | SIZE_NON_NORMAL | |
247 | } |
|
243 | } | |
248 | } |
|
244 | } | |
249 |
|
245 | |||
250 |
|
|
246 | fn v1_mtime(&self) -> i32 { | |
|
247 | if !self.any_tracked() { | |||
|
248 | // TODO: return an Option instead? | |||
|
249 | panic!("Accessing v1_mtime of an untracked DirstateEntry") | |||
|
250 | } | |||
251 | if self.removed() { |
|
251 | if self.removed() { | |
252 | 0 |
|
252 | 0 | |
253 |
} else if self.flags.contains(Flags::P |
|
253 | } else if self.flags.contains(Flags::P2_INFO) { | |
254 | MTIME_UNSET |
|
|||
255 | } else if self.merged() { |
|
|||
256 | MTIME_UNSET |
|
254 | MTIME_UNSET | |
257 |
} else if self. |
|
255 | } else if !self.flags.contains(Flags::P1_TRACKED) { | |
258 | MTIME_UNSET |
|
|||
259 | } else if self.from_p2() { |
|
|||
260 | MTIME_UNSET |
|
256 | MTIME_UNSET | |
261 | } else { |
|
257 | } else { | |
262 | self.mtime |
|
258 | self.mtime.unwrap_or(MTIME_UNSET) | |
|
259 | } | |||
|
260 | } | |||
|
261 | ||||
|
262 | // TODO: return `Option<EntryState>`? None when `!self.any_tracked` | |||
|
263 | pub fn state(&self) -> EntryState { | |||
|
264 | self.v1_state() | |||
263 |
|
|
265 | } | |
|
266 | ||||
|
267 | // TODO: return Option? | |||
|
268 | pub fn mode(&self) -> i32 { | |||
|
269 | self.v1_mode() | |||
|
270 | } | |||
|
271 | ||||
|
272 | // TODO: return Option? | |||
|
273 | pub fn size(&self) -> i32 { | |||
|
274 | self.v1_size() | |||
|
275 | } | |||
|
276 | ||||
|
277 | // TODO: return Option? | |||
|
278 | pub fn mtime(&self) -> i32 { | |||
|
279 | self.v1_mtime() | |||
264 | } |
|
280 | } | |
265 |
|
281 | |||
266 | pub fn drop_merge_data(&mut self) { |
|
282 | pub fn drop_merge_data(&mut self) { | |
267 |
if self.flags.contains(Flags:: |
|
283 | if self.flags.contains(Flags::P2_INFO) { | |
268 |
|
|
284 | self.flags.remove(Flags::P2_INFO); | |
269 | || self.flags.contains(Flags::MERGED) |
|
285 | self.mode_size = None; | |
270 | || self.flags.contains(Flags::P2_TRACKED) |
|
286 | self.mtime = None; | |
271 | { |
|
|||
272 | if self.flags.contains(Flags::MERGED) { |
|
|||
273 | self.flags.insert(Flags::P1_TRACKED); |
|
|||
274 | } else { |
|
|||
275 | self.flags.remove(Flags::P1_TRACKED); |
|
|||
276 | } |
|
|||
277 | self.flags.remove( |
|
|||
278 | Flags::MERGED |
|
|||
279 | | Flags::CLEAN_P1 |
|
|||
280 | | Flags::CLEAN_P2 |
|
|||
281 | | Flags::P2_TRACKED, |
|
|||
282 | ); |
|
|||
283 | self.flags.insert(Flags::POSSIBLY_DIRTY); |
|
|||
284 | self.mode = 0; |
|
|||
285 | self.mtime = 0; |
|
|||
286 | // size = None on the python size turn into size = NON_NORMAL when |
|
|||
287 | // accessed. So the next line is currently required, but a some |
|
|||
288 | // future clean up would be welcome. |
|
|||
289 | self.size = SIZE_NON_NORMAL; |
|
|||
290 | } |
|
287 | } | |
291 | } |
|
288 | } | |
292 |
|
289 | |||
293 | pub fn set_possibly_dirty(&mut self) { |
|
290 | pub fn set_possibly_dirty(&mut self) { | |
294 | self.flags.insert(Flags::POSSIBLY_DIRTY) |
|
291 | self.mtime = None | |
295 | } |
|
292 | } | |
296 |
|
293 | |||
297 | pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) { |
|
294 | pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) { | |
298 | self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED); |
|
295 | self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED); | |
299 | self.flags.remove( |
|
296 | self.mode_size = Some((mode, size)); | |
300 | Flags::P2_TRACKED // This might be wrong |
|
297 | self.mtime = Some(mtime); | |
301 | | Flags::MERGED |
|
|||
302 | | Flags::CLEAN_P2 |
|
|||
303 | | Flags::POSSIBLY_DIRTY, |
|
|||
304 | ); |
|
|||
305 | self.mode = mode; |
|
|||
306 | self.size = size; |
|
|||
307 | self.mtime = mtime; |
|
|||
308 | } |
|
298 | } | |
309 |
|
299 | |||
310 | pub fn set_tracked(&mut self) { |
|
300 | pub fn set_tracked(&mut self) { | |
311 | self.flags |
|
301 | self.flags.insert(Flags::WDIR_TRACKED); | |
312 | .insert(Flags::WDIR_TRACKED | Flags::POSSIBLY_DIRTY); |
|
302 | // `set_tracked` is replacing various `normallookup` call. So we mark | |
313 | // size = None on the python size turn into size = NON_NORMAL when |
|
303 | // the files as needing lookup | |
314 | // accessed. So the next line is currently required, but a some future |
|
304 | // | |
315 | // clean up would be welcome. |
|
305 | // Consider dropping this in the future in favor of something less | |
316 | self.size = SIZE_NON_NORMAL; |
|
306 | // broad. | |
|
307 | self.mtime = None; | |||
317 | } |
|
308 | } | |
318 |
|
309 | |||
319 | pub fn set_untracked(&mut self) { |
|
310 | pub fn set_untracked(&mut self) { | |
320 | self.flags.remove(Flags::WDIR_TRACKED); |
|
311 | self.flags.remove(Flags::WDIR_TRACKED); | |
321 |
self.mode = |
|
312 | self.mode_size = None; | |
322 |
self. |
|
313 | self.mtime = None; | |
323 | self.mtime = 0; |
|
|||
324 | } |
|
314 | } | |
325 |
|
315 | |||
326 | /// Returns `(state, mode, size, mtime)` for the puprose of serialization |
|
316 | /// Returns `(state, mode, size, mtime)` for the puprose of serialization | |
327 | /// in the dirstate-v1 format. |
|
317 | /// in the dirstate-v1 format. | |
328 | /// |
|
318 | /// | |
329 | /// This includes marker values such as `mtime == -1`. In the future we may |
|
319 | /// This includes marker values such as `mtime == -1`. In the future we may | |
330 | /// want to not represent these cases that way in memory, but serialization |
|
320 | /// want to not represent these cases that way in memory, but serialization | |
331 | /// will need to keep the same format. |
|
321 | /// will need to keep the same format. | |
332 | pub fn v1_data(&self) -> (u8, i32, i32, i32) { |
|
322 | pub fn v1_data(&self) -> (u8, i32, i32, i32) { | |
333 | (self.state().into(), self.mode(), self.size(), self.mtime()) |
|
323 | ( | |
|
324 | self.v1_state().into(), | |||
|
325 | self.v1_mode(), | |||
|
326 | self.v1_size(), | |||
|
327 | self.v1_mtime(), | |||
|
328 | ) | |||
334 | } |
|
329 | } | |
335 |
|
330 | |||
336 | pub(crate) fn is_from_other_parent(&self) -> bool { |
|
331 | pub(crate) fn is_from_other_parent(&self) -> bool { | |
337 | self.state() == EntryState::Normal |
|
332 | self.state() == EntryState::Normal | |
338 | && self.size() == SIZE_FROM_OTHER_PARENT |
|
333 | && self.size() == SIZE_FROM_OTHER_PARENT | |
339 | } |
|
334 | } | |
340 |
|
335 | |||
341 | // TODO: other platforms |
|
336 | // TODO: other platforms | |
342 | #[cfg(unix)] |
|
337 | #[cfg(unix)] | |
343 | pub fn mode_changed( |
|
338 | pub fn mode_changed( | |
344 | &self, |
|
339 | &self, | |
345 | filesystem_metadata: &std::fs::Metadata, |
|
340 | filesystem_metadata: &std::fs::Metadata, | |
346 | ) -> bool { |
|
341 | ) -> bool { | |
347 | use std::os::unix::fs::MetadataExt; |
|
342 | use std::os::unix::fs::MetadataExt; | |
348 | const EXEC_BIT_MASK: u32 = 0o100; |
|
343 | const EXEC_BIT_MASK: u32 = 0o100; | |
349 | let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK; |
|
344 | let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK; | |
350 | let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; |
|
345 | let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; | |
351 | dirstate_exec_bit != fs_exec_bit |
|
346 | dirstate_exec_bit != fs_exec_bit | |
352 | } |
|
347 | } | |
353 |
|
348 | |||
354 | /// Returns a `(state, mode, size, mtime)` tuple as for |
|
349 | /// Returns a `(state, mode, size, mtime)` tuple as for | |
355 | /// `DirstateMapMethods::debug_iter`. |
|
350 | /// `DirstateMapMethods::debug_iter`. | |
356 | pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { |
|
351 | pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { | |
357 | let state = if self.flags.contains(Flags::ENTRYLESS_TREE_NODE) { |
|
352 | (self.state().into(), self.mode(), self.size(), self.mtime()) | |
358 | b' ' |
|
|||
359 | } else { |
|
|||
360 | self.state().into() |
|
|||
361 | }; |
|
|||
362 | (state, self.mode(), self.size(), self.mtime()) |
|
|||
363 | } |
|
353 | } | |
364 |
|
354 | |||
365 | pub fn mtime_is_ambiguous(&self, now: i32) -> bool { |
|
355 | pub fn mtime_is_ambiguous(&self, now: i32) -> bool { | |
366 | self.state() == EntryState::Normal && self.mtime() == now |
|
356 | self.state() == EntryState::Normal && self.mtime() == now | |
367 | } |
|
357 | } | |
368 |
|
358 | |||
369 | pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool { |
|
359 | pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool { | |
370 | let ambiguous = self.mtime_is_ambiguous(now); |
|
360 | let ambiguous = self.mtime_is_ambiguous(now); | |
371 | if ambiguous { |
|
361 | if ambiguous { | |
372 | // The file was last modified "simultaneously" with the current |
|
362 | // The file was last modified "simultaneously" with the current | |
373 | // write to dirstate (i.e. within the same second for file- |
|
363 | // write to dirstate (i.e. within the same second for file- | |
374 | // systems with a granularity of 1 sec). This commonly happens |
|
364 | // systems with a granularity of 1 sec). This commonly happens | |
375 | // for at least a couple of files on 'update'. |
|
365 | // for at least a couple of files on 'update'. | |
376 | // The user could change the file without changing its size |
|
366 | // The user could change the file without changing its size | |
377 | // within the same second. Invalidate the file's mtime in |
|
367 | // within the same second. Invalidate the file's mtime in | |
378 | // dirstate, forcing future 'status' calls to compare the |
|
368 | // dirstate, forcing future 'status' calls to compare the | |
379 | // contents of the file if the size is the same. This prevents |
|
369 | // contents of the file if the size is the same. This prevents | |
380 | // mistakenly treating such files as clean. |
|
370 | // mistakenly treating such files as clean. | |
381 |
self. |
|
371 | self.set_possibly_dirty() | |
382 | } |
|
372 | } | |
383 | ambiguous |
|
373 | ambiguous | |
384 | } |
|
374 | } | |
385 |
|
||||
386 | pub fn clear_mtime(&mut self) { |
|
|||
387 | self.mtime = -1; |
|
|||
388 | } |
|
|||
389 | } |
|
375 | } | |
390 |
|
376 | |||
391 | impl EntryState { |
|
377 | impl EntryState { | |
392 | pub fn is_tracked(self) -> bool { |
|
378 | pub fn is_tracked(self) -> bool { | |
393 | use EntryState::*; |
|
379 | use EntryState::*; | |
394 | match self { |
|
380 | match self { | |
395 | Normal | Added | Merged => true, |
|
381 | Normal | Added | Merged => true, | |
396 | Removed => false, |
|
382 | Removed => false, | |
397 | } |
|
383 | } | |
398 | } |
|
384 | } | |
399 | } |
|
385 | } | |
400 |
|
386 | |||
401 | impl TryFrom<u8> for EntryState { |
|
387 | impl TryFrom<u8> for EntryState { | |
402 | type Error = HgError; |
|
388 | type Error = HgError; | |
403 |
|
389 | |||
404 | fn try_from(value: u8) -> Result<Self, Self::Error> { |
|
390 | fn try_from(value: u8) -> Result<Self, Self::Error> { | |
405 | match value { |
|
391 | match value { | |
406 | b'n' => Ok(EntryState::Normal), |
|
392 | b'n' => Ok(EntryState::Normal), | |
407 | b'a' => Ok(EntryState::Added), |
|
393 | b'a' => Ok(EntryState::Added), | |
408 | b'r' => Ok(EntryState::Removed), |
|
394 | b'r' => Ok(EntryState::Removed), | |
409 | b'm' => Ok(EntryState::Merged), |
|
395 | b'm' => Ok(EntryState::Merged), | |
410 | _ => Err(HgError::CorruptedRepository(format!( |
|
396 | _ => Err(HgError::CorruptedRepository(format!( | |
411 | "Incorrect dirstate entry state {}", |
|
397 | "Incorrect dirstate entry state {}", | |
412 | value |
|
398 | value | |
413 | ))), |
|
399 | ))), | |
414 | } |
|
400 | } | |
415 | } |
|
401 | } | |
416 | } |
|
402 | } | |
417 |
|
403 | |||
418 | impl Into<u8> for EntryState { |
|
404 | impl Into<u8> for EntryState { | |
419 | fn into(self) -> u8 { |
|
405 | fn into(self) -> u8 { | |
420 | match self { |
|
406 | match self { | |
421 | EntryState::Normal => b'n', |
|
407 | EntryState::Normal => b'n', | |
422 | EntryState::Added => b'a', |
|
408 | EntryState::Added => b'a', | |
423 | EntryState::Removed => b'r', |
|
409 | EntryState::Removed => b'r', | |
424 | EntryState::Merged => b'm', |
|
410 | EntryState::Merged => b'm', | |
425 | } |
|
411 | } | |
426 | } |
|
412 | } | |
427 | } |
|
413 | } |
@@ -1,1193 +1,1193 | |||||
1 | use bytes_cast::BytesCast; |
|
1 | use bytes_cast::BytesCast; | |
2 | use micro_timer::timed; |
|
2 | use micro_timer::timed; | |
3 | use std::borrow::Cow; |
|
3 | use std::borrow::Cow; | |
4 | use std::convert::TryInto; |
|
4 | use std::convert::TryInto; | |
5 | use std::path::PathBuf; |
|
5 | use std::path::PathBuf; | |
6 |
|
6 | |||
7 | use super::on_disk; |
|
7 | use super::on_disk; | |
8 | use super::on_disk::DirstateV2ParseError; |
|
8 | use super::on_disk::DirstateV2ParseError; | |
9 | use super::owning::OwningDirstateMap; |
|
9 | use super::owning::OwningDirstateMap; | |
10 | use super::path_with_basename::WithBasename; |
|
10 | use super::path_with_basename::WithBasename; | |
11 | use crate::dirstate::parsers::pack_entry; |
|
11 | use crate::dirstate::parsers::pack_entry; | |
12 | use crate::dirstate::parsers::packed_entry_size; |
|
12 | use crate::dirstate::parsers::packed_entry_size; | |
13 | use crate::dirstate::parsers::parse_dirstate_entries; |
|
13 | use crate::dirstate::parsers::parse_dirstate_entries; | |
14 | use crate::dirstate::parsers::Timestamp; |
|
14 | use crate::dirstate::parsers::Timestamp; | |
15 | use crate::dirstate::CopyMapIter; |
|
15 | use crate::dirstate::CopyMapIter; | |
16 | use crate::dirstate::StateMapIter; |
|
16 | use crate::dirstate::StateMapIter; | |
17 | use crate::dirstate::SIZE_FROM_OTHER_PARENT; |
|
17 | use crate::dirstate::SIZE_FROM_OTHER_PARENT; | |
18 | use crate::dirstate::SIZE_NON_NORMAL; |
|
18 | use crate::dirstate::SIZE_NON_NORMAL; | |
19 | use crate::matchers::Matcher; |
|
19 | use crate::matchers::Matcher; | |
20 | use crate::utils::hg_path::{HgPath, HgPathBuf}; |
|
20 | use crate::utils::hg_path::{HgPath, HgPathBuf}; | |
21 | use crate::DirstateEntry; |
|
21 | use crate::DirstateEntry; | |
22 | use crate::DirstateError; |
|
22 | use crate::DirstateError; | |
23 | use crate::DirstateParents; |
|
23 | use crate::DirstateParents; | |
24 | use crate::DirstateStatus; |
|
24 | use crate::DirstateStatus; | |
25 | use crate::EntryState; |
|
25 | use crate::EntryState; | |
26 | use crate::FastHashMap; |
|
26 | use crate::FastHashMap; | |
27 | use crate::PatternFileWarning; |
|
27 | use crate::PatternFileWarning; | |
28 | use crate::StatusError; |
|
28 | use crate::StatusError; | |
29 | use crate::StatusOptions; |
|
29 | use crate::StatusOptions; | |
30 |
|
30 | |||
31 | /// Append to an existing data file if the amount of unreachable data (not used |
|
31 | /// Append to an existing data file if the amount of unreachable data (not used | |
32 | /// anymore) is less than this fraction of the total amount of existing data. |
|
32 | /// anymore) is less than this fraction of the total amount of existing data. | |
33 | const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5; |
|
33 | const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5; | |
34 |
|
34 | |||
35 | pub struct DirstateMap<'on_disk> { |
|
35 | pub struct DirstateMap<'on_disk> { | |
36 | /// Contents of the `.hg/dirstate` file |
|
36 | /// Contents of the `.hg/dirstate` file | |
37 | pub(super) on_disk: &'on_disk [u8], |
|
37 | pub(super) on_disk: &'on_disk [u8], | |
38 |
|
38 | |||
39 | pub(super) root: ChildNodes<'on_disk>, |
|
39 | pub(super) root: ChildNodes<'on_disk>, | |
40 |
|
40 | |||
41 | /// Number of nodes anywhere in the tree that have `.entry.is_some()`. |
|
41 | /// Number of nodes anywhere in the tree that have `.entry.is_some()`. | |
42 | pub(super) nodes_with_entry_count: u32, |
|
42 | pub(super) nodes_with_entry_count: u32, | |
43 |
|
43 | |||
44 | /// Number of nodes anywhere in the tree that have |
|
44 | /// Number of nodes anywhere in the tree that have | |
45 | /// `.copy_source.is_some()`. |
|
45 | /// `.copy_source.is_some()`. | |
46 | pub(super) nodes_with_copy_source_count: u32, |
|
46 | pub(super) nodes_with_copy_source_count: u32, | |
47 |
|
47 | |||
48 | /// See on_disk::Header |
|
48 | /// See on_disk::Header | |
49 | pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash, |
|
49 | pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash, | |
50 |
|
50 | |||
51 | /// How many bytes of `on_disk` are not used anymore |
|
51 | /// How many bytes of `on_disk` are not used anymore | |
52 | pub(super) unreachable_bytes: u32, |
|
52 | pub(super) unreachable_bytes: u32, | |
53 | } |
|
53 | } | |
54 |
|
54 | |||
55 | /// Using a plain `HgPathBuf` of the full path from the repository root as a |
|
55 | /// Using a plain `HgPathBuf` of the full path from the repository root as a | |
56 | /// map key would also work: all paths in a given map have the same parent |
|
56 | /// map key would also work: all paths in a given map have the same parent | |
57 | /// path, so comparing full paths gives the same result as comparing base |
|
57 | /// path, so comparing full paths gives the same result as comparing base | |
58 | /// names. However `HashMap` would waste time always re-hashing the same |
|
58 | /// names. However `HashMap` would waste time always re-hashing the same | |
59 | /// string prefix. |
|
59 | /// string prefix. | |
60 | pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>; |
|
60 | pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>; | |
61 |
|
61 | |||
62 | /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned |
|
62 | /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned | |
63 | /// for on-disk nodes that don’t actually have a `Cow` to borrow. |
|
63 | /// for on-disk nodes that don’t actually have a `Cow` to borrow. | |
64 | pub(super) enum BorrowedPath<'tree, 'on_disk> { |
|
64 | pub(super) enum BorrowedPath<'tree, 'on_disk> { | |
65 | InMemory(&'tree HgPathBuf), |
|
65 | InMemory(&'tree HgPathBuf), | |
66 | OnDisk(&'on_disk HgPath), |
|
66 | OnDisk(&'on_disk HgPath), | |
67 | } |
|
67 | } | |
68 |
|
68 | |||
69 | pub(super) enum ChildNodes<'on_disk> { |
|
69 | pub(super) enum ChildNodes<'on_disk> { | |
70 | InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>), |
|
70 | InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>), | |
71 | OnDisk(&'on_disk [on_disk::Node]), |
|
71 | OnDisk(&'on_disk [on_disk::Node]), | |
72 | } |
|
72 | } | |
73 |
|
73 | |||
74 | pub(super) enum ChildNodesRef<'tree, 'on_disk> { |
|
74 | pub(super) enum ChildNodesRef<'tree, 'on_disk> { | |
75 | InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>), |
|
75 | InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>), | |
76 | OnDisk(&'on_disk [on_disk::Node]), |
|
76 | OnDisk(&'on_disk [on_disk::Node]), | |
77 | } |
|
77 | } | |
78 |
|
78 | |||
79 | pub(super) enum NodeRef<'tree, 'on_disk> { |
|
79 | pub(super) enum NodeRef<'tree, 'on_disk> { | |
80 | InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>), |
|
80 | InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>), | |
81 | OnDisk(&'on_disk on_disk::Node), |
|
81 | OnDisk(&'on_disk on_disk::Node), | |
82 | } |
|
82 | } | |
83 |
|
83 | |||
84 | impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> { |
|
84 | impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> { | |
85 | pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> { |
|
85 | pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> { | |
86 | match *self { |
|
86 | match *self { | |
87 | BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()), |
|
87 | BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()), | |
88 | BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk), |
|
88 | BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk), | |
89 | } |
|
89 | } | |
90 | } |
|
90 | } | |
91 | } |
|
91 | } | |
92 |
|
92 | |||
93 | impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> { |
|
93 | impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> { | |
94 | type Target = HgPath; |
|
94 | type Target = HgPath; | |
95 |
|
95 | |||
96 | fn deref(&self) -> &HgPath { |
|
96 | fn deref(&self) -> &HgPath { | |
97 | match *self { |
|
97 | match *self { | |
98 | BorrowedPath::InMemory(in_memory) => in_memory, |
|
98 | BorrowedPath::InMemory(in_memory) => in_memory, | |
99 | BorrowedPath::OnDisk(on_disk) => on_disk, |
|
99 | BorrowedPath::OnDisk(on_disk) => on_disk, | |
100 | } |
|
100 | } | |
101 | } |
|
101 | } | |
102 | } |
|
102 | } | |
103 |
|
103 | |||
104 | impl Default for ChildNodes<'_> { |
|
104 | impl Default for ChildNodes<'_> { | |
105 | fn default() -> Self { |
|
105 | fn default() -> Self { | |
106 | ChildNodes::InMemory(Default::default()) |
|
106 | ChildNodes::InMemory(Default::default()) | |
107 | } |
|
107 | } | |
108 | } |
|
108 | } | |
109 |
|
109 | |||
110 | impl<'on_disk> ChildNodes<'on_disk> { |
|
110 | impl<'on_disk> ChildNodes<'on_disk> { | |
111 | pub(super) fn as_ref<'tree>( |
|
111 | pub(super) fn as_ref<'tree>( | |
112 | &'tree self, |
|
112 | &'tree self, | |
113 | ) -> ChildNodesRef<'tree, 'on_disk> { |
|
113 | ) -> ChildNodesRef<'tree, 'on_disk> { | |
114 | match self { |
|
114 | match self { | |
115 | ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes), |
|
115 | ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes), | |
116 | ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes), |
|
116 | ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes), | |
117 | } |
|
117 | } | |
118 | } |
|
118 | } | |
119 |
|
119 | |||
120 | pub(super) fn is_empty(&self) -> bool { |
|
120 | pub(super) fn is_empty(&self) -> bool { | |
121 | match self { |
|
121 | match self { | |
122 | ChildNodes::InMemory(nodes) => nodes.is_empty(), |
|
122 | ChildNodes::InMemory(nodes) => nodes.is_empty(), | |
123 | ChildNodes::OnDisk(nodes) => nodes.is_empty(), |
|
123 | ChildNodes::OnDisk(nodes) => nodes.is_empty(), | |
124 | } |
|
124 | } | |
125 | } |
|
125 | } | |
126 |
|
126 | |||
127 | fn make_mut( |
|
127 | fn make_mut( | |
128 | &mut self, |
|
128 | &mut self, | |
129 | on_disk: &'on_disk [u8], |
|
129 | on_disk: &'on_disk [u8], | |
130 | unreachable_bytes: &mut u32, |
|
130 | unreachable_bytes: &mut u32, | |
131 | ) -> Result< |
|
131 | ) -> Result< | |
132 | &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>, |
|
132 | &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>, | |
133 | DirstateV2ParseError, |
|
133 | DirstateV2ParseError, | |
134 | > { |
|
134 | > { | |
135 | match self { |
|
135 | match self { | |
136 | ChildNodes::InMemory(nodes) => Ok(nodes), |
|
136 | ChildNodes::InMemory(nodes) => Ok(nodes), | |
137 | ChildNodes::OnDisk(nodes) => { |
|
137 | ChildNodes::OnDisk(nodes) => { | |
138 | *unreachable_bytes += |
|
138 | *unreachable_bytes += | |
139 | std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32; |
|
139 | std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32; | |
140 | let nodes = nodes |
|
140 | let nodes = nodes | |
141 | .iter() |
|
141 | .iter() | |
142 | .map(|node| { |
|
142 | .map(|node| { | |
143 | Ok(( |
|
143 | Ok(( | |
144 | node.path(on_disk)?, |
|
144 | node.path(on_disk)?, | |
145 | node.to_in_memory_node(on_disk)?, |
|
145 | node.to_in_memory_node(on_disk)?, | |
146 | )) |
|
146 | )) | |
147 | }) |
|
147 | }) | |
148 | .collect::<Result<_, _>>()?; |
|
148 | .collect::<Result<_, _>>()?; | |
149 | *self = ChildNodes::InMemory(nodes); |
|
149 | *self = ChildNodes::InMemory(nodes); | |
150 | match self { |
|
150 | match self { | |
151 | ChildNodes::InMemory(nodes) => Ok(nodes), |
|
151 | ChildNodes::InMemory(nodes) => Ok(nodes), | |
152 | ChildNodes::OnDisk(_) => unreachable!(), |
|
152 | ChildNodes::OnDisk(_) => unreachable!(), | |
153 | } |
|
153 | } | |
154 | } |
|
154 | } | |
155 | } |
|
155 | } | |
156 | } |
|
156 | } | |
157 | } |
|
157 | } | |
158 |
|
158 | |||
159 | impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> { |
|
159 | impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> { | |
160 | pub(super) fn get( |
|
160 | pub(super) fn get( | |
161 | &self, |
|
161 | &self, | |
162 | base_name: &HgPath, |
|
162 | base_name: &HgPath, | |
163 | on_disk: &'on_disk [u8], |
|
163 | on_disk: &'on_disk [u8], | |
164 | ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> { |
|
164 | ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> { | |
165 | match self { |
|
165 | match self { | |
166 | ChildNodesRef::InMemory(nodes) => Ok(nodes |
|
166 | ChildNodesRef::InMemory(nodes) => Ok(nodes | |
167 | .get_key_value(base_name) |
|
167 | .get_key_value(base_name) | |
168 | .map(|(k, v)| NodeRef::InMemory(k, v))), |
|
168 | .map(|(k, v)| NodeRef::InMemory(k, v))), | |
169 | ChildNodesRef::OnDisk(nodes) => { |
|
169 | ChildNodesRef::OnDisk(nodes) => { | |
170 | let mut parse_result = Ok(()); |
|
170 | let mut parse_result = Ok(()); | |
171 | let search_result = nodes.binary_search_by(|node| { |
|
171 | let search_result = nodes.binary_search_by(|node| { | |
172 | match node.base_name(on_disk) { |
|
172 | match node.base_name(on_disk) { | |
173 | Ok(node_base_name) => node_base_name.cmp(base_name), |
|
173 | Ok(node_base_name) => node_base_name.cmp(base_name), | |
174 | Err(e) => { |
|
174 | Err(e) => { | |
175 | parse_result = Err(e); |
|
175 | parse_result = Err(e); | |
176 | // Dummy comparison result, `search_result` won’t |
|
176 | // Dummy comparison result, `search_result` won’t | |
177 | // be used since `parse_result` is an error |
|
177 | // be used since `parse_result` is an error | |
178 | std::cmp::Ordering::Equal |
|
178 | std::cmp::Ordering::Equal | |
179 | } |
|
179 | } | |
180 | } |
|
180 | } | |
181 | }); |
|
181 | }); | |
182 | parse_result.map(|()| { |
|
182 | parse_result.map(|()| { | |
183 | search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i])) |
|
183 | search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i])) | |
184 | }) |
|
184 | }) | |
185 | } |
|
185 | } | |
186 | } |
|
186 | } | |
187 | } |
|
187 | } | |
188 |
|
188 | |||
189 | /// Iterate in undefined order |
|
189 | /// Iterate in undefined order | |
190 | pub(super) fn iter( |
|
190 | pub(super) fn iter( | |
191 | &self, |
|
191 | &self, | |
192 | ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> { |
|
192 | ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> { | |
193 | match self { |
|
193 | match self { | |
194 | ChildNodesRef::InMemory(nodes) => itertools::Either::Left( |
|
194 | ChildNodesRef::InMemory(nodes) => itertools::Either::Left( | |
195 | nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)), |
|
195 | nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)), | |
196 | ), |
|
196 | ), | |
197 | ChildNodesRef::OnDisk(nodes) => { |
|
197 | ChildNodesRef::OnDisk(nodes) => { | |
198 | itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk)) |
|
198 | itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk)) | |
199 | } |
|
199 | } | |
200 | } |
|
200 | } | |
201 | } |
|
201 | } | |
202 |
|
202 | |||
203 | /// Iterate in parallel in undefined order |
|
203 | /// Iterate in parallel in undefined order | |
204 | pub(super) fn par_iter( |
|
204 | pub(super) fn par_iter( | |
205 | &self, |
|
205 | &self, | |
206 | ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>> |
|
206 | ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>> | |
207 | { |
|
207 | { | |
208 | use rayon::prelude::*; |
|
208 | use rayon::prelude::*; | |
209 | match self { |
|
209 | match self { | |
210 | ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left( |
|
210 | ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left( | |
211 | nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)), |
|
211 | nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)), | |
212 | ), |
|
212 | ), | |
213 | ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right( |
|
213 | ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right( | |
214 | nodes.par_iter().map(NodeRef::OnDisk), |
|
214 | nodes.par_iter().map(NodeRef::OnDisk), | |
215 | ), |
|
215 | ), | |
216 | } |
|
216 | } | |
217 | } |
|
217 | } | |
218 |
|
218 | |||
219 | pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> { |
|
219 | pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> { | |
220 | match self { |
|
220 | match self { | |
221 | ChildNodesRef::InMemory(nodes) => { |
|
221 | ChildNodesRef::InMemory(nodes) => { | |
222 | let mut vec: Vec<_> = nodes |
|
222 | let mut vec: Vec<_> = nodes | |
223 | .iter() |
|
223 | .iter() | |
224 | .map(|(k, v)| NodeRef::InMemory(k, v)) |
|
224 | .map(|(k, v)| NodeRef::InMemory(k, v)) | |
225 | .collect(); |
|
225 | .collect(); | |
226 | fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath { |
|
226 | fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath { | |
227 | match node { |
|
227 | match node { | |
228 | NodeRef::InMemory(path, _node) => path.base_name(), |
|
228 | NodeRef::InMemory(path, _node) => path.base_name(), | |
229 | NodeRef::OnDisk(_) => unreachable!(), |
|
229 | NodeRef::OnDisk(_) => unreachable!(), | |
230 | } |
|
230 | } | |
231 | } |
|
231 | } | |
232 | // `sort_unstable_by_key` doesn’t allow keys borrowing from the |
|
232 | // `sort_unstable_by_key` doesn’t allow keys borrowing from the | |
233 | // value: https://github.com/rust-lang/rust/issues/34162 |
|
233 | // value: https://github.com/rust-lang/rust/issues/34162 | |
234 | vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b))); |
|
234 | vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b))); | |
235 | vec |
|
235 | vec | |
236 | } |
|
236 | } | |
237 | ChildNodesRef::OnDisk(nodes) => { |
|
237 | ChildNodesRef::OnDisk(nodes) => { | |
238 | // Nodes on disk are already sorted |
|
238 | // Nodes on disk are already sorted | |
239 | nodes.iter().map(NodeRef::OnDisk).collect() |
|
239 | nodes.iter().map(NodeRef::OnDisk).collect() | |
240 | } |
|
240 | } | |
241 | } |
|
241 | } | |
242 | } |
|
242 | } | |
243 | } |
|
243 | } | |
244 |
|
244 | |||
245 | impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> { |
|
245 | impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> { | |
246 | pub(super) fn full_path( |
|
246 | pub(super) fn full_path( | |
247 | &self, |
|
247 | &self, | |
248 | on_disk: &'on_disk [u8], |
|
248 | on_disk: &'on_disk [u8], | |
249 | ) -> Result<&'tree HgPath, DirstateV2ParseError> { |
|
249 | ) -> Result<&'tree HgPath, DirstateV2ParseError> { | |
250 | match self { |
|
250 | match self { | |
251 | NodeRef::InMemory(path, _node) => Ok(path.full_path()), |
|
251 | NodeRef::InMemory(path, _node) => Ok(path.full_path()), | |
252 | NodeRef::OnDisk(node) => node.full_path(on_disk), |
|
252 | NodeRef::OnDisk(node) => node.full_path(on_disk), | |
253 | } |
|
253 | } | |
254 | } |
|
254 | } | |
255 |
|
255 | |||
256 | /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk, |
|
256 | /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk, | |
257 | /// HgPath>` detached from `'tree` |
|
257 | /// HgPath>` detached from `'tree` | |
258 | pub(super) fn full_path_borrowed( |
|
258 | pub(super) fn full_path_borrowed( | |
259 | &self, |
|
259 | &self, | |
260 | on_disk: &'on_disk [u8], |
|
260 | on_disk: &'on_disk [u8], | |
261 | ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> { |
|
261 | ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> { | |
262 | match self { |
|
262 | match self { | |
263 | NodeRef::InMemory(path, _node) => match path.full_path() { |
|
263 | NodeRef::InMemory(path, _node) => match path.full_path() { | |
264 | Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)), |
|
264 | Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)), | |
265 | Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)), |
|
265 | Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)), | |
266 | }, |
|
266 | }, | |
267 | NodeRef::OnDisk(node) => { |
|
267 | NodeRef::OnDisk(node) => { | |
268 | Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?)) |
|
268 | Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?)) | |
269 | } |
|
269 | } | |
270 | } |
|
270 | } | |
271 | } |
|
271 | } | |
272 |
|
272 | |||
273 | pub(super) fn base_name( |
|
273 | pub(super) fn base_name( | |
274 | &self, |
|
274 | &self, | |
275 | on_disk: &'on_disk [u8], |
|
275 | on_disk: &'on_disk [u8], | |
276 | ) -> Result<&'tree HgPath, DirstateV2ParseError> { |
|
276 | ) -> Result<&'tree HgPath, DirstateV2ParseError> { | |
277 | match self { |
|
277 | match self { | |
278 | NodeRef::InMemory(path, _node) => Ok(path.base_name()), |
|
278 | NodeRef::InMemory(path, _node) => Ok(path.base_name()), | |
279 | NodeRef::OnDisk(node) => node.base_name(on_disk), |
|
279 | NodeRef::OnDisk(node) => node.base_name(on_disk), | |
280 | } |
|
280 | } | |
281 | } |
|
281 | } | |
282 |
|
282 | |||
283 | pub(super) fn children( |
|
283 | pub(super) fn children( | |
284 | &self, |
|
284 | &self, | |
285 | on_disk: &'on_disk [u8], |
|
285 | on_disk: &'on_disk [u8], | |
286 | ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> { |
|
286 | ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> { | |
287 | match self { |
|
287 | match self { | |
288 | NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()), |
|
288 | NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()), | |
289 | NodeRef::OnDisk(node) => { |
|
289 | NodeRef::OnDisk(node) => { | |
290 | Ok(ChildNodesRef::OnDisk(node.children(on_disk)?)) |
|
290 | Ok(ChildNodesRef::OnDisk(node.children(on_disk)?)) | |
291 | } |
|
291 | } | |
292 | } |
|
292 | } | |
293 | } |
|
293 | } | |
294 |
|
294 | |||
295 | pub(super) fn has_copy_source(&self) -> bool { |
|
295 | pub(super) fn has_copy_source(&self) -> bool { | |
296 | match self { |
|
296 | match self { | |
297 | NodeRef::InMemory(_path, node) => node.copy_source.is_some(), |
|
297 | NodeRef::InMemory(_path, node) => node.copy_source.is_some(), | |
298 | NodeRef::OnDisk(node) => node.has_copy_source(), |
|
298 | NodeRef::OnDisk(node) => node.has_copy_source(), | |
299 | } |
|
299 | } | |
300 | } |
|
300 | } | |
301 |
|
301 | |||
302 | pub(super) fn copy_source( |
|
302 | pub(super) fn copy_source( | |
303 | &self, |
|
303 | &self, | |
304 | on_disk: &'on_disk [u8], |
|
304 | on_disk: &'on_disk [u8], | |
305 | ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> { |
|
305 | ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> { | |
306 | match self { |
|
306 | match self { | |
307 | NodeRef::InMemory(_path, node) => { |
|
307 | NodeRef::InMemory(_path, node) => { | |
308 | Ok(node.copy_source.as_ref().map(|s| &**s)) |
|
308 | Ok(node.copy_source.as_ref().map(|s| &**s)) | |
309 | } |
|
309 | } | |
310 | NodeRef::OnDisk(node) => node.copy_source(on_disk), |
|
310 | NodeRef::OnDisk(node) => node.copy_source(on_disk), | |
311 | } |
|
311 | } | |
312 | } |
|
312 | } | |
313 |
|
313 | |||
314 | pub(super) fn entry( |
|
314 | pub(super) fn entry( | |
315 | &self, |
|
315 | &self, | |
316 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { |
|
316 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { | |
317 | match self { |
|
317 | match self { | |
318 | NodeRef::InMemory(_path, node) => { |
|
318 | NodeRef::InMemory(_path, node) => { | |
319 | Ok(node.data.as_entry().copied()) |
|
319 | Ok(node.data.as_entry().copied()) | |
320 | } |
|
320 | } | |
321 | NodeRef::OnDisk(node) => node.entry(), |
|
321 | NodeRef::OnDisk(node) => node.entry(), | |
322 | } |
|
322 | } | |
323 | } |
|
323 | } | |
324 |
|
324 | |||
325 | pub(super) fn state( |
|
325 | pub(super) fn state( | |
326 | &self, |
|
326 | &self, | |
327 | ) -> Result<Option<EntryState>, DirstateV2ParseError> { |
|
327 | ) -> Result<Option<EntryState>, DirstateV2ParseError> { | |
328 | match self { |
|
328 | match self { | |
329 | NodeRef::InMemory(_path, node) => { |
|
329 | NodeRef::InMemory(_path, node) => { | |
330 | Ok(node.data.as_entry().map(|entry| entry.state())) |
|
330 | Ok(node.data.as_entry().map(|entry| entry.state())) | |
331 | } |
|
331 | } | |
332 | NodeRef::OnDisk(node) => node.state(), |
|
332 | NodeRef::OnDisk(node) => node.state(), | |
333 | } |
|
333 | } | |
334 | } |
|
334 | } | |
335 |
|
335 | |||
336 | pub(super) fn cached_directory_mtime( |
|
336 | pub(super) fn cached_directory_mtime( | |
337 | &self, |
|
337 | &self, | |
338 | ) -> Option<&'tree on_disk::Timestamp> { |
|
338 | ) -> Option<&'tree on_disk::Timestamp> { | |
339 | match self { |
|
339 | match self { | |
340 | NodeRef::InMemory(_path, node) => match &node.data { |
|
340 | NodeRef::InMemory(_path, node) => match &node.data { | |
341 | NodeData::CachedDirectory { mtime } => Some(mtime), |
|
341 | NodeData::CachedDirectory { mtime } => Some(mtime), | |
342 | _ => None, |
|
342 | _ => None, | |
343 | }, |
|
343 | }, | |
344 | NodeRef::OnDisk(node) => node.cached_directory_mtime(), |
|
344 | NodeRef::OnDisk(node) => node.cached_directory_mtime(), | |
345 | } |
|
345 | } | |
346 | } |
|
346 | } | |
347 |
|
347 | |||
348 | pub(super) fn descendants_with_entry_count(&self) -> u32 { |
|
348 | pub(super) fn descendants_with_entry_count(&self) -> u32 { | |
349 | match self { |
|
349 | match self { | |
350 | NodeRef::InMemory(_path, node) => { |
|
350 | NodeRef::InMemory(_path, node) => { | |
351 | node.descendants_with_entry_count |
|
351 | node.descendants_with_entry_count | |
352 | } |
|
352 | } | |
353 | NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(), |
|
353 | NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(), | |
354 | } |
|
354 | } | |
355 | } |
|
355 | } | |
356 |
|
356 | |||
357 | pub(super) fn tracked_descendants_count(&self) -> u32 { |
|
357 | pub(super) fn tracked_descendants_count(&self) -> u32 { | |
358 | match self { |
|
358 | match self { | |
359 | NodeRef::InMemory(_path, node) => node.tracked_descendants_count, |
|
359 | NodeRef::InMemory(_path, node) => node.tracked_descendants_count, | |
360 | NodeRef::OnDisk(node) => node.tracked_descendants_count.get(), |
|
360 | NodeRef::OnDisk(node) => node.tracked_descendants_count.get(), | |
361 | } |
|
361 | } | |
362 | } |
|
362 | } | |
363 | } |
|
363 | } | |
364 |
|
364 | |||
365 | /// Represents a file or a directory |
|
365 | /// Represents a file or a directory | |
366 | #[derive(Default)] |
|
366 | #[derive(Default)] | |
367 | pub(super) struct Node<'on_disk> { |
|
367 | pub(super) struct Node<'on_disk> { | |
368 | pub(super) data: NodeData, |
|
368 | pub(super) data: NodeData, | |
369 |
|
369 | |||
370 | pub(super) copy_source: Option<Cow<'on_disk, HgPath>>, |
|
370 | pub(super) copy_source: Option<Cow<'on_disk, HgPath>>, | |
371 |
|
371 | |||
372 | pub(super) children: ChildNodes<'on_disk>, |
|
372 | pub(super) children: ChildNodes<'on_disk>, | |
373 |
|
373 | |||
374 | /// How many (non-inclusive) descendants of this node have an entry. |
|
374 | /// How many (non-inclusive) descendants of this node have an entry. | |
375 | pub(super) descendants_with_entry_count: u32, |
|
375 | pub(super) descendants_with_entry_count: u32, | |
376 |
|
376 | |||
377 | /// How many (non-inclusive) descendants of this node have an entry whose |
|
377 | /// How many (non-inclusive) descendants of this node have an entry whose | |
378 | /// state is "tracked". |
|
378 | /// state is "tracked". | |
379 | pub(super) tracked_descendants_count: u32, |
|
379 | pub(super) tracked_descendants_count: u32, | |
380 | } |
|
380 | } | |
381 |
|
381 | |||
382 | pub(super) enum NodeData { |
|
382 | pub(super) enum NodeData { | |
383 | Entry(DirstateEntry), |
|
383 | Entry(DirstateEntry), | |
384 | CachedDirectory { mtime: on_disk::Timestamp }, |
|
384 | CachedDirectory { mtime: on_disk::Timestamp }, | |
385 | None, |
|
385 | None, | |
386 | } |
|
386 | } | |
387 |
|
387 | |||
388 | impl Default for NodeData { |
|
388 | impl Default for NodeData { | |
389 | fn default() -> Self { |
|
389 | fn default() -> Self { | |
390 | NodeData::None |
|
390 | NodeData::None | |
391 | } |
|
391 | } | |
392 | } |
|
392 | } | |
393 |
|
393 | |||
394 | impl NodeData { |
|
394 | impl NodeData { | |
395 | fn has_entry(&self) -> bool { |
|
395 | fn has_entry(&self) -> bool { | |
396 | match self { |
|
396 | match self { | |
397 | NodeData::Entry(_) => true, |
|
397 | NodeData::Entry(_) => true, | |
398 | _ => false, |
|
398 | _ => false, | |
399 | } |
|
399 | } | |
400 | } |
|
400 | } | |
401 |
|
401 | |||
402 | fn as_entry(&self) -> Option<&DirstateEntry> { |
|
402 | fn as_entry(&self) -> Option<&DirstateEntry> { | |
403 | match self { |
|
403 | match self { | |
404 | NodeData::Entry(entry) => Some(entry), |
|
404 | NodeData::Entry(entry) => Some(entry), | |
405 | _ => None, |
|
405 | _ => None, | |
406 | } |
|
406 | } | |
407 | } |
|
407 | } | |
408 | } |
|
408 | } | |
409 |
|
409 | |||
410 | impl<'on_disk> DirstateMap<'on_disk> { |
|
410 | impl<'on_disk> DirstateMap<'on_disk> { | |
411 | pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self { |
|
411 | pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self { | |
412 | Self { |
|
412 | Self { | |
413 | on_disk, |
|
413 | on_disk, | |
414 | root: ChildNodes::default(), |
|
414 | root: ChildNodes::default(), | |
415 | nodes_with_entry_count: 0, |
|
415 | nodes_with_entry_count: 0, | |
416 | nodes_with_copy_source_count: 0, |
|
416 | nodes_with_copy_source_count: 0, | |
417 | ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN], |
|
417 | ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN], | |
418 | unreachable_bytes: 0, |
|
418 | unreachable_bytes: 0, | |
419 | } |
|
419 | } | |
420 | } |
|
420 | } | |
421 |
|
421 | |||
422 | #[timed] |
|
422 | #[timed] | |
423 | pub fn new_v2( |
|
423 | pub fn new_v2( | |
424 | on_disk: &'on_disk [u8], |
|
424 | on_disk: &'on_disk [u8], | |
425 | data_size: usize, |
|
425 | data_size: usize, | |
426 | metadata: &[u8], |
|
426 | metadata: &[u8], | |
427 | ) -> Result<Self, DirstateError> { |
|
427 | ) -> Result<Self, DirstateError> { | |
428 | if let Some(data) = on_disk.get(..data_size) { |
|
428 | if let Some(data) = on_disk.get(..data_size) { | |
429 | Ok(on_disk::read(data, metadata)?) |
|
429 | Ok(on_disk::read(data, metadata)?) | |
430 | } else { |
|
430 | } else { | |
431 | Err(DirstateV2ParseError.into()) |
|
431 | Err(DirstateV2ParseError.into()) | |
432 | } |
|
432 | } | |
433 | } |
|
433 | } | |
434 |
|
434 | |||
435 | #[timed] |
|
435 | #[timed] | |
436 | pub fn new_v1( |
|
436 | pub fn new_v1( | |
437 | on_disk: &'on_disk [u8], |
|
437 | on_disk: &'on_disk [u8], | |
438 | ) -> Result<(Self, Option<DirstateParents>), DirstateError> { |
|
438 | ) -> Result<(Self, Option<DirstateParents>), DirstateError> { | |
439 | let mut map = Self::empty(on_disk); |
|
439 | let mut map = Self::empty(on_disk); | |
440 | if map.on_disk.is_empty() { |
|
440 | if map.on_disk.is_empty() { | |
441 | return Ok((map, None)); |
|
441 | return Ok((map, None)); | |
442 | } |
|
442 | } | |
443 |
|
443 | |||
444 | let parents = parse_dirstate_entries( |
|
444 | let parents = parse_dirstate_entries( | |
445 | map.on_disk, |
|
445 | map.on_disk, | |
446 | |path, entry, copy_source| { |
|
446 | |path, entry, copy_source| { | |
447 | let tracked = entry.state().is_tracked(); |
|
447 | let tracked = entry.state().is_tracked(); | |
448 | let node = Self::get_or_insert_node( |
|
448 | let node = Self::get_or_insert_node( | |
449 | map.on_disk, |
|
449 | map.on_disk, | |
450 | &mut map.unreachable_bytes, |
|
450 | &mut map.unreachable_bytes, | |
451 | &mut map.root, |
|
451 | &mut map.root, | |
452 | path, |
|
452 | path, | |
453 | WithBasename::to_cow_borrowed, |
|
453 | WithBasename::to_cow_borrowed, | |
454 | |ancestor| { |
|
454 | |ancestor| { | |
455 | if tracked { |
|
455 | if tracked { | |
456 | ancestor.tracked_descendants_count += 1 |
|
456 | ancestor.tracked_descendants_count += 1 | |
457 | } |
|
457 | } | |
458 | ancestor.descendants_with_entry_count += 1 |
|
458 | ancestor.descendants_with_entry_count += 1 | |
459 | }, |
|
459 | }, | |
460 | )?; |
|
460 | )?; | |
461 | assert!( |
|
461 | assert!( | |
462 | !node.data.has_entry(), |
|
462 | !node.data.has_entry(), | |
463 | "duplicate dirstate entry in read" |
|
463 | "duplicate dirstate entry in read" | |
464 | ); |
|
464 | ); | |
465 | assert!( |
|
465 | assert!( | |
466 | node.copy_source.is_none(), |
|
466 | node.copy_source.is_none(), | |
467 | "duplicate dirstate entry in read" |
|
467 | "duplicate dirstate entry in read" | |
468 | ); |
|
468 | ); | |
469 | node.data = NodeData::Entry(*entry); |
|
469 | node.data = NodeData::Entry(*entry); | |
470 | node.copy_source = copy_source.map(Cow::Borrowed); |
|
470 | node.copy_source = copy_source.map(Cow::Borrowed); | |
471 | map.nodes_with_entry_count += 1; |
|
471 | map.nodes_with_entry_count += 1; | |
472 | if copy_source.is_some() { |
|
472 | if copy_source.is_some() { | |
473 | map.nodes_with_copy_source_count += 1 |
|
473 | map.nodes_with_copy_source_count += 1 | |
474 | } |
|
474 | } | |
475 | Ok(()) |
|
475 | Ok(()) | |
476 | }, |
|
476 | }, | |
477 | )?; |
|
477 | )?; | |
478 | let parents = Some(parents.clone()); |
|
478 | let parents = Some(parents.clone()); | |
479 |
|
479 | |||
480 | Ok((map, parents)) |
|
480 | Ok((map, parents)) | |
481 | } |
|
481 | } | |
482 |
|
482 | |||
483 | /// Assuming dirstate-v2 format, returns whether the next write should |
|
483 | /// Assuming dirstate-v2 format, returns whether the next write should | |
484 | /// append to the existing data file that contains `self.on_disk` (true), |
|
484 | /// append to the existing data file that contains `self.on_disk` (true), | |
485 | /// or create a new data file from scratch (false). |
|
485 | /// or create a new data file from scratch (false). | |
486 | pub(super) fn write_should_append(&self) -> bool { |
|
486 | pub(super) fn write_should_append(&self) -> bool { | |
487 | let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32; |
|
487 | let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32; | |
488 | ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO |
|
488 | ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO | |
489 | } |
|
489 | } | |
490 |
|
490 | |||
491 | fn get_node<'tree>( |
|
491 | fn get_node<'tree>( | |
492 | &'tree self, |
|
492 | &'tree self, | |
493 | path: &HgPath, |
|
493 | path: &HgPath, | |
494 | ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> { |
|
494 | ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> { | |
495 | let mut children = self.root.as_ref(); |
|
495 | let mut children = self.root.as_ref(); | |
496 | let mut components = path.components(); |
|
496 | let mut components = path.components(); | |
497 | let mut component = |
|
497 | let mut component = | |
498 | components.next().expect("expected at least one components"); |
|
498 | components.next().expect("expected at least one components"); | |
499 | loop { |
|
499 | loop { | |
500 | if let Some(child) = children.get(component, self.on_disk)? { |
|
500 | if let Some(child) = children.get(component, self.on_disk)? { | |
501 | if let Some(next_component) = components.next() { |
|
501 | if let Some(next_component) = components.next() { | |
502 | component = next_component; |
|
502 | component = next_component; | |
503 | children = child.children(self.on_disk)?; |
|
503 | children = child.children(self.on_disk)?; | |
504 | } else { |
|
504 | } else { | |
505 | return Ok(Some(child)); |
|
505 | return Ok(Some(child)); | |
506 | } |
|
506 | } | |
507 | } else { |
|
507 | } else { | |
508 | return Ok(None); |
|
508 | return Ok(None); | |
509 | } |
|
509 | } | |
510 | } |
|
510 | } | |
511 | } |
|
511 | } | |
512 |
|
512 | |||
513 | /// Returns a mutable reference to the node at `path` if it exists |
|
513 | /// Returns a mutable reference to the node at `path` if it exists | |
514 | /// |
|
514 | /// | |
515 | /// This takes `root` instead of `&mut self` so that callers can mutate |
|
515 | /// This takes `root` instead of `&mut self` so that callers can mutate | |
516 | /// other fields while the returned borrow is still valid |
|
516 | /// other fields while the returned borrow is still valid | |
517 | fn get_node_mut<'tree>( |
|
517 | fn get_node_mut<'tree>( | |
518 | on_disk: &'on_disk [u8], |
|
518 | on_disk: &'on_disk [u8], | |
519 | unreachable_bytes: &mut u32, |
|
519 | unreachable_bytes: &mut u32, | |
520 | root: &'tree mut ChildNodes<'on_disk>, |
|
520 | root: &'tree mut ChildNodes<'on_disk>, | |
521 | path: &HgPath, |
|
521 | path: &HgPath, | |
522 | ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> { |
|
522 | ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> { | |
523 | let mut children = root; |
|
523 | let mut children = root; | |
524 | let mut components = path.components(); |
|
524 | let mut components = path.components(); | |
525 | let mut component = |
|
525 | let mut component = | |
526 | components.next().expect("expected at least one components"); |
|
526 | components.next().expect("expected at least one components"); | |
527 | loop { |
|
527 | loop { | |
528 | if let Some(child) = children |
|
528 | if let Some(child) = children | |
529 | .make_mut(on_disk, unreachable_bytes)? |
|
529 | .make_mut(on_disk, unreachable_bytes)? | |
530 | .get_mut(component) |
|
530 | .get_mut(component) | |
531 | { |
|
531 | { | |
532 | if let Some(next_component) = components.next() { |
|
532 | if let Some(next_component) = components.next() { | |
533 | component = next_component; |
|
533 | component = next_component; | |
534 | children = &mut child.children; |
|
534 | children = &mut child.children; | |
535 | } else { |
|
535 | } else { | |
536 | return Ok(Some(child)); |
|
536 | return Ok(Some(child)); | |
537 | } |
|
537 | } | |
538 | } else { |
|
538 | } else { | |
539 | return Ok(None); |
|
539 | return Ok(None); | |
540 | } |
|
540 | } | |
541 | } |
|
541 | } | |
542 | } |
|
542 | } | |
543 |
|
543 | |||
544 | pub(super) fn get_or_insert<'tree, 'path>( |
|
544 | pub(super) fn get_or_insert<'tree, 'path>( | |
545 | &'tree mut self, |
|
545 | &'tree mut self, | |
546 | path: &HgPath, |
|
546 | path: &HgPath, | |
547 | ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> { |
|
547 | ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> { | |
548 | Self::get_or_insert_node( |
|
548 | Self::get_or_insert_node( | |
549 | self.on_disk, |
|
549 | self.on_disk, | |
550 | &mut self.unreachable_bytes, |
|
550 | &mut self.unreachable_bytes, | |
551 | &mut self.root, |
|
551 | &mut self.root, | |
552 | path, |
|
552 | path, | |
553 | WithBasename::to_cow_owned, |
|
553 | WithBasename::to_cow_owned, | |
554 | |_| {}, |
|
554 | |_| {}, | |
555 | ) |
|
555 | ) | |
556 | } |
|
556 | } | |
557 |
|
557 | |||
558 | fn get_or_insert_node<'tree, 'path>( |
|
558 | fn get_or_insert_node<'tree, 'path>( | |
559 | on_disk: &'on_disk [u8], |
|
559 | on_disk: &'on_disk [u8], | |
560 | unreachable_bytes: &mut u32, |
|
560 | unreachable_bytes: &mut u32, | |
561 | root: &'tree mut ChildNodes<'on_disk>, |
|
561 | root: &'tree mut ChildNodes<'on_disk>, | |
562 | path: &'path HgPath, |
|
562 | path: &'path HgPath, | |
563 | to_cow: impl Fn( |
|
563 | to_cow: impl Fn( | |
564 | WithBasename<&'path HgPath>, |
|
564 | WithBasename<&'path HgPath>, | |
565 | ) -> WithBasename<Cow<'on_disk, HgPath>>, |
|
565 | ) -> WithBasename<Cow<'on_disk, HgPath>>, | |
566 | mut each_ancestor: impl FnMut(&mut Node), |
|
566 | mut each_ancestor: impl FnMut(&mut Node), | |
567 | ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> { |
|
567 | ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> { | |
568 | let mut child_nodes = root; |
|
568 | let mut child_nodes = root; | |
569 | let mut inclusive_ancestor_paths = |
|
569 | let mut inclusive_ancestor_paths = | |
570 | WithBasename::inclusive_ancestors_of(path); |
|
570 | WithBasename::inclusive_ancestors_of(path); | |
571 | let mut ancestor_path = inclusive_ancestor_paths |
|
571 | let mut ancestor_path = inclusive_ancestor_paths | |
572 | .next() |
|
572 | .next() | |
573 | .expect("expected at least one inclusive ancestor"); |
|
573 | .expect("expected at least one inclusive ancestor"); | |
574 | loop { |
|
574 | loop { | |
575 | // TODO: can we avoid allocating an owned key in cases where the |
|
575 | // TODO: can we avoid allocating an owned key in cases where the | |
576 | // map already contains that key, without introducing double |
|
576 | // map already contains that key, without introducing double | |
577 | // lookup? |
|
577 | // lookup? | |
578 | let child_node = child_nodes |
|
578 | let child_node = child_nodes | |
579 | .make_mut(on_disk, unreachable_bytes)? |
|
579 | .make_mut(on_disk, unreachable_bytes)? | |
580 | .entry(to_cow(ancestor_path)) |
|
580 | .entry(to_cow(ancestor_path)) | |
581 | .or_default(); |
|
581 | .or_default(); | |
582 | if let Some(next) = inclusive_ancestor_paths.next() { |
|
582 | if let Some(next) = inclusive_ancestor_paths.next() { | |
583 | each_ancestor(child_node); |
|
583 | each_ancestor(child_node); | |
584 | ancestor_path = next; |
|
584 | ancestor_path = next; | |
585 | child_nodes = &mut child_node.children; |
|
585 | child_nodes = &mut child_node.children; | |
586 | } else { |
|
586 | } else { | |
587 | return Ok(child_node); |
|
587 | return Ok(child_node); | |
588 | } |
|
588 | } | |
589 | } |
|
589 | } | |
590 | } |
|
590 | } | |
591 |
|
591 | |||
592 | fn add_or_remove_file( |
|
592 | fn add_or_remove_file( | |
593 | &mut self, |
|
593 | &mut self, | |
594 | path: &HgPath, |
|
594 | path: &HgPath, | |
595 | old_state: Option<EntryState>, |
|
595 | old_state: Option<EntryState>, | |
596 | new_entry: DirstateEntry, |
|
596 | new_entry: DirstateEntry, | |
597 | ) -> Result<(), DirstateV2ParseError> { |
|
597 | ) -> Result<(), DirstateV2ParseError> { | |
598 | let had_entry = old_state.is_some(); |
|
598 | let had_entry = old_state.is_some(); | |
599 | let was_tracked = old_state.map_or(false, |s| s.is_tracked()); |
|
599 | let was_tracked = old_state.map_or(false, |s| s.is_tracked()); | |
600 | let tracked_count_increment = |
|
600 | let tracked_count_increment = | |
601 | match (was_tracked, new_entry.state().is_tracked()) { |
|
601 | match (was_tracked, new_entry.state().is_tracked()) { | |
602 | (false, true) => 1, |
|
602 | (false, true) => 1, | |
603 | (true, false) => -1, |
|
603 | (true, false) => -1, | |
604 | _ => 0, |
|
604 | _ => 0, | |
605 | }; |
|
605 | }; | |
606 |
|
606 | |||
607 | let node = Self::get_or_insert_node( |
|
607 | let node = Self::get_or_insert_node( | |
608 | self.on_disk, |
|
608 | self.on_disk, | |
609 | &mut self.unreachable_bytes, |
|
609 | &mut self.unreachable_bytes, | |
610 | &mut self.root, |
|
610 | &mut self.root, | |
611 | path, |
|
611 | path, | |
612 | WithBasename::to_cow_owned, |
|
612 | WithBasename::to_cow_owned, | |
613 | |ancestor| { |
|
613 | |ancestor| { | |
614 | if !had_entry { |
|
614 | if !had_entry { | |
615 | ancestor.descendants_with_entry_count += 1; |
|
615 | ancestor.descendants_with_entry_count += 1; | |
616 | } |
|
616 | } | |
617 |
|
617 | |||
618 | // We can’t use `+= increment` because the counter is unsigned, |
|
618 | // We can’t use `+= increment` because the counter is unsigned, | |
619 | // and we want debug builds to detect accidental underflow |
|
619 | // and we want debug builds to detect accidental underflow | |
620 | // through zero |
|
620 | // through zero | |
621 | match tracked_count_increment { |
|
621 | match tracked_count_increment { | |
622 | 1 => ancestor.tracked_descendants_count += 1, |
|
622 | 1 => ancestor.tracked_descendants_count += 1, | |
623 | -1 => ancestor.tracked_descendants_count -= 1, |
|
623 | -1 => ancestor.tracked_descendants_count -= 1, | |
624 | _ => {} |
|
624 | _ => {} | |
625 | } |
|
625 | } | |
626 | }, |
|
626 | }, | |
627 | )?; |
|
627 | )?; | |
628 | if !had_entry { |
|
628 | if !had_entry { | |
629 | self.nodes_with_entry_count += 1 |
|
629 | self.nodes_with_entry_count += 1 | |
630 | } |
|
630 | } | |
631 | node.data = NodeData::Entry(new_entry); |
|
631 | node.data = NodeData::Entry(new_entry); | |
632 | Ok(()) |
|
632 | Ok(()) | |
633 | } |
|
633 | } | |
634 |
|
634 | |||
635 | fn iter_nodes<'tree>( |
|
635 | fn iter_nodes<'tree>( | |
636 | &'tree self, |
|
636 | &'tree self, | |
637 | ) -> impl Iterator< |
|
637 | ) -> impl Iterator< | |
638 | Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>, |
|
638 | Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>, | |
639 | > + 'tree { |
|
639 | > + 'tree { | |
640 | // Depth first tree traversal. |
|
640 | // Depth first tree traversal. | |
641 | // |
|
641 | // | |
642 | // If we could afford internal iteration and recursion, |
|
642 | // If we could afford internal iteration and recursion, | |
643 | // this would look like: |
|
643 | // this would look like: | |
644 | // |
|
644 | // | |
645 | // ``` |
|
645 | // ``` | |
646 | // fn traverse_children( |
|
646 | // fn traverse_children( | |
647 | // children: &ChildNodes, |
|
647 | // children: &ChildNodes, | |
648 | // each: &mut impl FnMut(&Node), |
|
648 | // each: &mut impl FnMut(&Node), | |
649 | // ) { |
|
649 | // ) { | |
650 | // for child in children.values() { |
|
650 | // for child in children.values() { | |
651 | // traverse_children(&child.children, each); |
|
651 | // traverse_children(&child.children, each); | |
652 | // each(child); |
|
652 | // each(child); | |
653 | // } |
|
653 | // } | |
654 | // } |
|
654 | // } | |
655 | // ``` |
|
655 | // ``` | |
656 | // |
|
656 | // | |
657 | // However we want an external iterator and therefore can’t use the |
|
657 | // However we want an external iterator and therefore can’t use the | |
658 | // call stack. Use an explicit stack instead: |
|
658 | // call stack. Use an explicit stack instead: | |
659 | let mut stack = Vec::new(); |
|
659 | let mut stack = Vec::new(); | |
660 | let mut iter = self.root.as_ref().iter(); |
|
660 | let mut iter = self.root.as_ref().iter(); | |
661 | std::iter::from_fn(move || { |
|
661 | std::iter::from_fn(move || { | |
662 | while let Some(child_node) = iter.next() { |
|
662 | while let Some(child_node) = iter.next() { | |
663 | let children = match child_node.children(self.on_disk) { |
|
663 | let children = match child_node.children(self.on_disk) { | |
664 | Ok(children) => children, |
|
664 | Ok(children) => children, | |
665 | Err(error) => return Some(Err(error)), |
|
665 | Err(error) => return Some(Err(error)), | |
666 | }; |
|
666 | }; | |
667 | // Pseudo-recursion |
|
667 | // Pseudo-recursion | |
668 | let new_iter = children.iter(); |
|
668 | let new_iter = children.iter(); | |
669 | let old_iter = std::mem::replace(&mut iter, new_iter); |
|
669 | let old_iter = std::mem::replace(&mut iter, new_iter); | |
670 | stack.push((child_node, old_iter)); |
|
670 | stack.push((child_node, old_iter)); | |
671 | } |
|
671 | } | |
672 | // Found the end of a `children.iter()` iterator. |
|
672 | // Found the end of a `children.iter()` iterator. | |
673 | if let Some((child_node, next_iter)) = stack.pop() { |
|
673 | if let Some((child_node, next_iter)) = stack.pop() { | |
674 | // "Return" from pseudo-recursion by restoring state from the |
|
674 | // "Return" from pseudo-recursion by restoring state from the | |
675 | // explicit stack |
|
675 | // explicit stack | |
676 | iter = next_iter; |
|
676 | iter = next_iter; | |
677 |
|
677 | |||
678 | Some(Ok(child_node)) |
|
678 | Some(Ok(child_node)) | |
679 | } else { |
|
679 | } else { | |
680 | // Reached the bottom of the stack, we’re done |
|
680 | // Reached the bottom of the stack, we’re done | |
681 | None |
|
681 | None | |
682 | } |
|
682 | } | |
683 | }) |
|
683 | }) | |
684 | } |
|
684 | } | |
685 |
|
685 | |||
686 | fn clear_known_ambiguous_mtimes( |
|
686 | fn clear_known_ambiguous_mtimes( | |
687 | &mut self, |
|
687 | &mut self, | |
688 | paths: &[impl AsRef<HgPath>], |
|
688 | paths: &[impl AsRef<HgPath>], | |
689 | ) -> Result<(), DirstateV2ParseError> { |
|
689 | ) -> Result<(), DirstateV2ParseError> { | |
690 | for path in paths { |
|
690 | for path in paths { | |
691 | if let Some(node) = Self::get_node_mut( |
|
691 | if let Some(node) = Self::get_node_mut( | |
692 | self.on_disk, |
|
692 | self.on_disk, | |
693 | &mut self.unreachable_bytes, |
|
693 | &mut self.unreachable_bytes, | |
694 | &mut self.root, |
|
694 | &mut self.root, | |
695 | path.as_ref(), |
|
695 | path.as_ref(), | |
696 | )? { |
|
696 | )? { | |
697 | if let NodeData::Entry(entry) = &mut node.data { |
|
697 | if let NodeData::Entry(entry) = &mut node.data { | |
698 |
entry. |
|
698 | entry.set_possibly_dirty(); | |
699 | } |
|
699 | } | |
700 | } |
|
700 | } | |
701 | } |
|
701 | } | |
702 | Ok(()) |
|
702 | Ok(()) | |
703 | } |
|
703 | } | |
704 |
|
704 | |||
705 | fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) { |
|
705 | fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) { | |
706 | if let Cow::Borrowed(path) = path { |
|
706 | if let Cow::Borrowed(path) = path { | |
707 | *unreachable_bytes += path.len() as u32 |
|
707 | *unreachable_bytes += path.len() as u32 | |
708 | } |
|
708 | } | |
709 | } |
|
709 | } | |
710 | } |
|
710 | } | |
711 |
|
711 | |||
712 | /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s. |
|
712 | /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s. | |
713 | /// |
|
713 | /// | |
714 | /// The callback is only called for incoming `Ok` values. Errors are passed |
|
714 | /// The callback is only called for incoming `Ok` values. Errors are passed | |
715 | /// through as-is. In order to let it use the `?` operator the callback is |
|
715 | /// through as-is. In order to let it use the `?` operator the callback is | |
716 | /// expected to return a `Result` of `Option`, instead of an `Option` of |
|
716 | /// expected to return a `Result` of `Option`, instead of an `Option` of | |
717 | /// `Result`. |
|
717 | /// `Result`. | |
718 | fn filter_map_results<'a, I, F, A, B, E>( |
|
718 | fn filter_map_results<'a, I, F, A, B, E>( | |
719 | iter: I, |
|
719 | iter: I, | |
720 | f: F, |
|
720 | f: F, | |
721 | ) -> impl Iterator<Item = Result<B, E>> + 'a |
|
721 | ) -> impl Iterator<Item = Result<B, E>> + 'a | |
722 | where |
|
722 | where | |
723 | I: Iterator<Item = Result<A, E>> + 'a, |
|
723 | I: Iterator<Item = Result<A, E>> + 'a, | |
724 | F: Fn(A) -> Result<Option<B>, E> + 'a, |
|
724 | F: Fn(A) -> Result<Option<B>, E> + 'a, | |
725 | { |
|
725 | { | |
726 | iter.filter_map(move |result| match result { |
|
726 | iter.filter_map(move |result| match result { | |
727 | Ok(node) => f(node).transpose(), |
|
727 | Ok(node) => f(node).transpose(), | |
728 | Err(e) => Some(Err(e)), |
|
728 | Err(e) => Some(Err(e)), | |
729 | }) |
|
729 | }) | |
730 | } |
|
730 | } | |
731 |
|
731 | |||
732 | impl OwningDirstateMap { |
|
732 | impl OwningDirstateMap { | |
733 | pub fn clear(&mut self) { |
|
733 | pub fn clear(&mut self) { | |
734 | let map = self.get_map_mut(); |
|
734 | let map = self.get_map_mut(); | |
735 | map.root = Default::default(); |
|
735 | map.root = Default::default(); | |
736 | map.nodes_with_entry_count = 0; |
|
736 | map.nodes_with_entry_count = 0; | |
737 | map.nodes_with_copy_source_count = 0; |
|
737 | map.nodes_with_copy_source_count = 0; | |
738 | } |
|
738 | } | |
739 |
|
739 | |||
740 | pub fn set_entry( |
|
740 | pub fn set_entry( | |
741 | &mut self, |
|
741 | &mut self, | |
742 | filename: &HgPath, |
|
742 | filename: &HgPath, | |
743 | entry: DirstateEntry, |
|
743 | entry: DirstateEntry, | |
744 | ) -> Result<(), DirstateV2ParseError> { |
|
744 | ) -> Result<(), DirstateV2ParseError> { | |
745 | let map = self.get_map_mut(); |
|
745 | let map = self.get_map_mut(); | |
746 | map.get_or_insert(&filename)?.data = NodeData::Entry(entry); |
|
746 | map.get_or_insert(&filename)?.data = NodeData::Entry(entry); | |
747 | Ok(()) |
|
747 | Ok(()) | |
748 | } |
|
748 | } | |
749 |
|
749 | |||
750 | pub fn add_file( |
|
750 | pub fn add_file( | |
751 | &mut self, |
|
751 | &mut self, | |
752 | filename: &HgPath, |
|
752 | filename: &HgPath, | |
753 | entry: DirstateEntry, |
|
753 | entry: DirstateEntry, | |
754 | ) -> Result<(), DirstateError> { |
|
754 | ) -> Result<(), DirstateError> { | |
755 | let old_state = self.get(filename)?.map(|e| e.state()); |
|
755 | let old_state = self.get(filename)?.map(|e| e.state()); | |
756 | let map = self.get_map_mut(); |
|
756 | let map = self.get_map_mut(); | |
757 | Ok(map.add_or_remove_file(filename, old_state, entry)?) |
|
757 | Ok(map.add_or_remove_file(filename, old_state, entry)?) | |
758 | } |
|
758 | } | |
759 |
|
759 | |||
760 | pub fn remove_file( |
|
760 | pub fn remove_file( | |
761 | &mut self, |
|
761 | &mut self, | |
762 | filename: &HgPath, |
|
762 | filename: &HgPath, | |
763 | in_merge: bool, |
|
763 | in_merge: bool, | |
764 | ) -> Result<(), DirstateError> { |
|
764 | ) -> Result<(), DirstateError> { | |
765 | let old_entry_opt = self.get(filename)?; |
|
765 | let old_entry_opt = self.get(filename)?; | |
766 | let old_state = old_entry_opt.map(|e| e.state()); |
|
766 | let old_state = old_entry_opt.map(|e| e.state()); | |
767 | let mut size = 0; |
|
767 | let mut size = 0; | |
768 | if in_merge { |
|
768 | if in_merge { | |
769 | // XXX we should not be able to have 'm' state and 'FROM_P2' if not |
|
769 | // XXX we should not be able to have 'm' state and 'FROM_P2' if not | |
770 | // during a merge. So I (marmoute) am not sure we need the |
|
770 | // during a merge. So I (marmoute) am not sure we need the | |
771 | // conditionnal at all. Adding double checking this with assert |
|
771 | // conditionnal at all. Adding double checking this with assert | |
772 | // would be nice. |
|
772 | // would be nice. | |
773 | if let Some(old_entry) = old_entry_opt { |
|
773 | if let Some(old_entry) = old_entry_opt { | |
774 | // backup the previous state |
|
774 | // backup the previous state | |
775 | if old_entry.state() == EntryState::Merged { |
|
775 | if old_entry.state() == EntryState::Merged { | |
776 | size = SIZE_NON_NORMAL; |
|
776 | size = SIZE_NON_NORMAL; | |
777 | } else if old_entry.state() == EntryState::Normal |
|
777 | } else if old_entry.state() == EntryState::Normal | |
778 | && old_entry.size() == SIZE_FROM_OTHER_PARENT |
|
778 | && old_entry.size() == SIZE_FROM_OTHER_PARENT | |
779 | { |
|
779 | { | |
780 | // other parent |
|
780 | // other parent | |
781 | size = SIZE_FROM_OTHER_PARENT; |
|
781 | size = SIZE_FROM_OTHER_PARENT; | |
782 | } |
|
782 | } | |
783 | } |
|
783 | } | |
784 | } |
|
784 | } | |
785 | if size == 0 { |
|
785 | if size == 0 { | |
786 | self.copy_map_remove(filename)?; |
|
786 | self.copy_map_remove(filename)?; | |
787 | } |
|
787 | } | |
788 | let map = self.get_map_mut(); |
|
788 | let map = self.get_map_mut(); | |
789 | let entry = DirstateEntry::new_removed(size); |
|
789 | let entry = DirstateEntry::new_removed(size); | |
790 | Ok(map.add_or_remove_file(filename, old_state, entry)?) |
|
790 | Ok(map.add_or_remove_file(filename, old_state, entry)?) | |
791 | } |
|
791 | } | |
792 |
|
792 | |||
793 | pub fn drop_entry_and_copy_source( |
|
793 | pub fn drop_entry_and_copy_source( | |
794 | &mut self, |
|
794 | &mut self, | |
795 | filename: &HgPath, |
|
795 | filename: &HgPath, | |
796 | ) -> Result<(), DirstateError> { |
|
796 | ) -> Result<(), DirstateError> { | |
797 | let was_tracked = self |
|
797 | let was_tracked = self | |
798 | .get(filename)? |
|
798 | .get(filename)? | |
799 | .map_or(false, |e| e.state().is_tracked()); |
|
799 | .map_or(false, |e| e.state().is_tracked()); | |
800 | let map = self.get_map_mut(); |
|
800 | let map = self.get_map_mut(); | |
801 | struct Dropped { |
|
801 | struct Dropped { | |
802 | was_tracked: bool, |
|
802 | was_tracked: bool, | |
803 | had_entry: bool, |
|
803 | had_entry: bool, | |
804 | had_copy_source: bool, |
|
804 | had_copy_source: bool, | |
805 | } |
|
805 | } | |
806 |
|
806 | |||
807 | /// If this returns `Ok(Some((dropped, removed)))`, then |
|
807 | /// If this returns `Ok(Some((dropped, removed)))`, then | |
808 | /// |
|
808 | /// | |
809 | /// * `dropped` is about the leaf node that was at `filename` |
|
809 | /// * `dropped` is about the leaf node that was at `filename` | |
810 | /// * `removed` is whether this particular level of recursion just |
|
810 | /// * `removed` is whether this particular level of recursion just | |
811 | /// removed a node in `nodes`. |
|
811 | /// removed a node in `nodes`. | |
812 | fn recur<'on_disk>( |
|
812 | fn recur<'on_disk>( | |
813 | on_disk: &'on_disk [u8], |
|
813 | on_disk: &'on_disk [u8], | |
814 | unreachable_bytes: &mut u32, |
|
814 | unreachable_bytes: &mut u32, | |
815 | nodes: &mut ChildNodes<'on_disk>, |
|
815 | nodes: &mut ChildNodes<'on_disk>, | |
816 | path: &HgPath, |
|
816 | path: &HgPath, | |
817 | ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> { |
|
817 | ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> { | |
818 | let (first_path_component, rest_of_path) = |
|
818 | let (first_path_component, rest_of_path) = | |
819 | path.split_first_component(); |
|
819 | path.split_first_component(); | |
820 | let nodes = nodes.make_mut(on_disk, unreachable_bytes)?; |
|
820 | let nodes = nodes.make_mut(on_disk, unreachable_bytes)?; | |
821 | let node = if let Some(node) = nodes.get_mut(first_path_component) |
|
821 | let node = if let Some(node) = nodes.get_mut(first_path_component) | |
822 | { |
|
822 | { | |
823 | node |
|
823 | node | |
824 | } else { |
|
824 | } else { | |
825 | return Ok(None); |
|
825 | return Ok(None); | |
826 | }; |
|
826 | }; | |
827 | let dropped; |
|
827 | let dropped; | |
828 | if let Some(rest) = rest_of_path { |
|
828 | if let Some(rest) = rest_of_path { | |
829 | if let Some((d, removed)) = recur( |
|
829 | if let Some((d, removed)) = recur( | |
830 | on_disk, |
|
830 | on_disk, | |
831 | unreachable_bytes, |
|
831 | unreachable_bytes, | |
832 | &mut node.children, |
|
832 | &mut node.children, | |
833 | rest, |
|
833 | rest, | |
834 | )? { |
|
834 | )? { | |
835 | dropped = d; |
|
835 | dropped = d; | |
836 | if dropped.had_entry { |
|
836 | if dropped.had_entry { | |
837 | node.descendants_with_entry_count -= 1; |
|
837 | node.descendants_with_entry_count -= 1; | |
838 | } |
|
838 | } | |
839 | if dropped.was_tracked { |
|
839 | if dropped.was_tracked { | |
840 | node.tracked_descendants_count -= 1; |
|
840 | node.tracked_descendants_count -= 1; | |
841 | } |
|
841 | } | |
842 |
|
842 | |||
843 | // Directory caches must be invalidated when removing a |
|
843 | // Directory caches must be invalidated when removing a | |
844 | // child node |
|
844 | // child node | |
845 | if removed { |
|
845 | if removed { | |
846 | if let NodeData::CachedDirectory { .. } = &node.data { |
|
846 | if let NodeData::CachedDirectory { .. } = &node.data { | |
847 | node.data = NodeData::None |
|
847 | node.data = NodeData::None | |
848 | } |
|
848 | } | |
849 | } |
|
849 | } | |
850 | } else { |
|
850 | } else { | |
851 | return Ok(None); |
|
851 | return Ok(None); | |
852 | } |
|
852 | } | |
853 | } else { |
|
853 | } else { | |
854 | let had_entry = node.data.has_entry(); |
|
854 | let had_entry = node.data.has_entry(); | |
855 | if had_entry { |
|
855 | if had_entry { | |
856 | node.data = NodeData::None |
|
856 | node.data = NodeData::None | |
857 | } |
|
857 | } | |
858 | if let Some(source) = &node.copy_source { |
|
858 | if let Some(source) = &node.copy_source { | |
859 | DirstateMap::count_dropped_path(unreachable_bytes, source); |
|
859 | DirstateMap::count_dropped_path(unreachable_bytes, source); | |
860 | node.copy_source = None |
|
860 | node.copy_source = None | |
861 | } |
|
861 | } | |
862 | dropped = Dropped { |
|
862 | dropped = Dropped { | |
863 | was_tracked: node |
|
863 | was_tracked: node | |
864 | .data |
|
864 | .data | |
865 | .as_entry() |
|
865 | .as_entry() | |
866 | .map_or(false, |entry| entry.state().is_tracked()), |
|
866 | .map_or(false, |entry| entry.state().is_tracked()), | |
867 | had_entry, |
|
867 | had_entry, | |
868 | had_copy_source: node.copy_source.take().is_some(), |
|
868 | had_copy_source: node.copy_source.take().is_some(), | |
869 | }; |
|
869 | }; | |
870 | } |
|
870 | } | |
871 | // After recursion, for both leaf (rest_of_path is None) nodes and |
|
871 | // After recursion, for both leaf (rest_of_path is None) nodes and | |
872 | // parent nodes, remove a node if it just became empty. |
|
872 | // parent nodes, remove a node if it just became empty. | |
873 | let remove = !node.data.has_entry() |
|
873 | let remove = !node.data.has_entry() | |
874 | && node.copy_source.is_none() |
|
874 | && node.copy_source.is_none() | |
875 | && node.children.is_empty(); |
|
875 | && node.children.is_empty(); | |
876 | if remove { |
|
876 | if remove { | |
877 | let (key, _) = |
|
877 | let (key, _) = | |
878 | nodes.remove_entry(first_path_component).unwrap(); |
|
878 | nodes.remove_entry(first_path_component).unwrap(); | |
879 | DirstateMap::count_dropped_path( |
|
879 | DirstateMap::count_dropped_path( | |
880 | unreachable_bytes, |
|
880 | unreachable_bytes, | |
881 | key.full_path(), |
|
881 | key.full_path(), | |
882 | ) |
|
882 | ) | |
883 | } |
|
883 | } | |
884 | Ok(Some((dropped, remove))) |
|
884 | Ok(Some((dropped, remove))) | |
885 | } |
|
885 | } | |
886 |
|
886 | |||
887 | if let Some((dropped, _removed)) = recur( |
|
887 | if let Some((dropped, _removed)) = recur( | |
888 | map.on_disk, |
|
888 | map.on_disk, | |
889 | &mut map.unreachable_bytes, |
|
889 | &mut map.unreachable_bytes, | |
890 | &mut map.root, |
|
890 | &mut map.root, | |
891 | filename, |
|
891 | filename, | |
892 | )? { |
|
892 | )? { | |
893 | if dropped.had_entry { |
|
893 | if dropped.had_entry { | |
894 | map.nodes_with_entry_count -= 1 |
|
894 | map.nodes_with_entry_count -= 1 | |
895 | } |
|
895 | } | |
896 | if dropped.had_copy_source { |
|
896 | if dropped.had_copy_source { | |
897 | map.nodes_with_copy_source_count -= 1 |
|
897 | map.nodes_with_copy_source_count -= 1 | |
898 | } |
|
898 | } | |
899 | } else { |
|
899 | } else { | |
900 | debug_assert!(!was_tracked); |
|
900 | debug_assert!(!was_tracked); | |
901 | } |
|
901 | } | |
902 | Ok(()) |
|
902 | Ok(()) | |
903 | } |
|
903 | } | |
904 |
|
904 | |||
905 | pub fn has_tracked_dir( |
|
905 | pub fn has_tracked_dir( | |
906 | &mut self, |
|
906 | &mut self, | |
907 | directory: &HgPath, |
|
907 | directory: &HgPath, | |
908 | ) -> Result<bool, DirstateError> { |
|
908 | ) -> Result<bool, DirstateError> { | |
909 | let map = self.get_map_mut(); |
|
909 | let map = self.get_map_mut(); | |
910 | if let Some(node) = map.get_node(directory)? { |
|
910 | if let Some(node) = map.get_node(directory)? { | |
911 | // A node without a `DirstateEntry` was created to hold child |
|
911 | // A node without a `DirstateEntry` was created to hold child | |
912 | // nodes, and is therefore a directory. |
|
912 | // nodes, and is therefore a directory. | |
913 | let state = node.state()?; |
|
913 | let state = node.state()?; | |
914 | Ok(state.is_none() && node.tracked_descendants_count() > 0) |
|
914 | Ok(state.is_none() && node.tracked_descendants_count() > 0) | |
915 | } else { |
|
915 | } else { | |
916 | Ok(false) |
|
916 | Ok(false) | |
917 | } |
|
917 | } | |
918 | } |
|
918 | } | |
919 |
|
919 | |||
920 | pub fn has_dir( |
|
920 | pub fn has_dir( | |
921 | &mut self, |
|
921 | &mut self, | |
922 | directory: &HgPath, |
|
922 | directory: &HgPath, | |
923 | ) -> Result<bool, DirstateError> { |
|
923 | ) -> Result<bool, DirstateError> { | |
924 | let map = self.get_map_mut(); |
|
924 | let map = self.get_map_mut(); | |
925 | if let Some(node) = map.get_node(directory)? { |
|
925 | if let Some(node) = map.get_node(directory)? { | |
926 | // A node without a `DirstateEntry` was created to hold child |
|
926 | // A node without a `DirstateEntry` was created to hold child | |
927 | // nodes, and is therefore a directory. |
|
927 | // nodes, and is therefore a directory. | |
928 | let state = node.state()?; |
|
928 | let state = node.state()?; | |
929 | Ok(state.is_none() && node.descendants_with_entry_count() > 0) |
|
929 | Ok(state.is_none() && node.descendants_with_entry_count() > 0) | |
930 | } else { |
|
930 | } else { | |
931 | Ok(false) |
|
931 | Ok(false) | |
932 | } |
|
932 | } | |
933 | } |
|
933 | } | |
934 |
|
934 | |||
935 | #[timed] |
|
935 | #[timed] | |
936 | pub fn pack_v1( |
|
936 | pub fn pack_v1( | |
937 | &mut self, |
|
937 | &mut self, | |
938 | parents: DirstateParents, |
|
938 | parents: DirstateParents, | |
939 | now: Timestamp, |
|
939 | now: Timestamp, | |
940 | ) -> Result<Vec<u8>, DirstateError> { |
|
940 | ) -> Result<Vec<u8>, DirstateError> { | |
941 | let map = self.get_map_mut(); |
|
941 | let map = self.get_map_mut(); | |
942 | let now: i32 = now.0.try_into().expect("time overflow"); |
|
942 | let now: i32 = now.0.try_into().expect("time overflow"); | |
943 | let mut ambiguous_mtimes = Vec::new(); |
|
943 | let mut ambiguous_mtimes = Vec::new(); | |
944 | // Optizimation (to be measured?): pre-compute size to avoid `Vec` |
|
944 | // Optizimation (to be measured?): pre-compute size to avoid `Vec` | |
945 | // reallocations |
|
945 | // reallocations | |
946 | let mut size = parents.as_bytes().len(); |
|
946 | let mut size = parents.as_bytes().len(); | |
947 | for node in map.iter_nodes() { |
|
947 | for node in map.iter_nodes() { | |
948 | let node = node?; |
|
948 | let node = node?; | |
949 | if let Some(entry) = node.entry()? { |
|
949 | if let Some(entry) = node.entry()? { | |
950 | size += packed_entry_size( |
|
950 | size += packed_entry_size( | |
951 | node.full_path(map.on_disk)?, |
|
951 | node.full_path(map.on_disk)?, | |
952 | node.copy_source(map.on_disk)?, |
|
952 | node.copy_source(map.on_disk)?, | |
953 | ); |
|
953 | ); | |
954 | if entry.mtime_is_ambiguous(now) { |
|
954 | if entry.mtime_is_ambiguous(now) { | |
955 | ambiguous_mtimes.push( |
|
955 | ambiguous_mtimes.push( | |
956 | node.full_path_borrowed(map.on_disk)? |
|
956 | node.full_path_borrowed(map.on_disk)? | |
957 | .detach_from_tree(), |
|
957 | .detach_from_tree(), | |
958 | ) |
|
958 | ) | |
959 | } |
|
959 | } | |
960 | } |
|
960 | } | |
961 | } |
|
961 | } | |
962 | map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?; |
|
962 | map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?; | |
963 |
|
963 | |||
964 | let mut packed = Vec::with_capacity(size); |
|
964 | let mut packed = Vec::with_capacity(size); | |
965 | packed.extend(parents.as_bytes()); |
|
965 | packed.extend(parents.as_bytes()); | |
966 |
|
966 | |||
967 | for node in map.iter_nodes() { |
|
967 | for node in map.iter_nodes() { | |
968 | let node = node?; |
|
968 | let node = node?; | |
969 | if let Some(entry) = node.entry()? { |
|
969 | if let Some(entry) = node.entry()? { | |
970 | pack_entry( |
|
970 | pack_entry( | |
971 | node.full_path(map.on_disk)?, |
|
971 | node.full_path(map.on_disk)?, | |
972 | &entry, |
|
972 | &entry, | |
973 | node.copy_source(map.on_disk)?, |
|
973 | node.copy_source(map.on_disk)?, | |
974 | &mut packed, |
|
974 | &mut packed, | |
975 | ); |
|
975 | ); | |
976 | } |
|
976 | } | |
977 | } |
|
977 | } | |
978 | Ok(packed) |
|
978 | Ok(packed) | |
979 | } |
|
979 | } | |
980 |
|
980 | |||
981 | /// Returns new data and metadata together with whether that data should be |
|
981 | /// Returns new data and metadata together with whether that data should be | |
982 | /// appended to the existing data file whose content is at |
|
982 | /// appended to the existing data file whose content is at | |
983 | /// `map.on_disk` (true), instead of written to a new data file |
|
983 | /// `map.on_disk` (true), instead of written to a new data file | |
984 | /// (false). |
|
984 | /// (false). | |
985 | #[timed] |
|
985 | #[timed] | |
986 | pub fn pack_v2( |
|
986 | pub fn pack_v2( | |
987 | &mut self, |
|
987 | &mut self, | |
988 | now: Timestamp, |
|
988 | now: Timestamp, | |
989 | can_append: bool, |
|
989 | can_append: bool, | |
990 | ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { |
|
990 | ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { | |
991 | let map = self.get_map_mut(); |
|
991 | let map = self.get_map_mut(); | |
992 | // TODO: how do we want to handle this in 2038? |
|
992 | // TODO: how do we want to handle this in 2038? | |
993 | let now: i32 = now.0.try_into().expect("time overflow"); |
|
993 | let now: i32 = now.0.try_into().expect("time overflow"); | |
994 | let mut paths = Vec::new(); |
|
994 | let mut paths = Vec::new(); | |
995 | for node in map.iter_nodes() { |
|
995 | for node in map.iter_nodes() { | |
996 | let node = node?; |
|
996 | let node = node?; | |
997 | if let Some(entry) = node.entry()? { |
|
997 | if let Some(entry) = node.entry()? { | |
998 | if entry.mtime_is_ambiguous(now) { |
|
998 | if entry.mtime_is_ambiguous(now) { | |
999 | paths.push( |
|
999 | paths.push( | |
1000 | node.full_path_borrowed(map.on_disk)? |
|
1000 | node.full_path_borrowed(map.on_disk)? | |
1001 | .detach_from_tree(), |
|
1001 | .detach_from_tree(), | |
1002 | ) |
|
1002 | ) | |
1003 | } |
|
1003 | } | |
1004 | } |
|
1004 | } | |
1005 | } |
|
1005 | } | |
1006 | // Borrow of `self` ends here since we collect cloned paths |
|
1006 | // Borrow of `self` ends here since we collect cloned paths | |
1007 |
|
1007 | |||
1008 | map.clear_known_ambiguous_mtimes(&paths)?; |
|
1008 | map.clear_known_ambiguous_mtimes(&paths)?; | |
1009 |
|
1009 | |||
1010 | on_disk::write(map, can_append) |
|
1010 | on_disk::write(map, can_append) | |
1011 | } |
|
1011 | } | |
1012 |
|
1012 | |||
1013 | pub fn status<'a>( |
|
1013 | pub fn status<'a>( | |
1014 | &'a mut self, |
|
1014 | &'a mut self, | |
1015 | matcher: &'a (dyn Matcher + Sync), |
|
1015 | matcher: &'a (dyn Matcher + Sync), | |
1016 | root_dir: PathBuf, |
|
1016 | root_dir: PathBuf, | |
1017 | ignore_files: Vec<PathBuf>, |
|
1017 | ignore_files: Vec<PathBuf>, | |
1018 | options: StatusOptions, |
|
1018 | options: StatusOptions, | |
1019 | ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> |
|
1019 | ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> | |
1020 | { |
|
1020 | { | |
1021 | let map = self.get_map_mut(); |
|
1021 | let map = self.get_map_mut(); | |
1022 | super::status::status(map, matcher, root_dir, ignore_files, options) |
|
1022 | super::status::status(map, matcher, root_dir, ignore_files, options) | |
1023 | } |
|
1023 | } | |
1024 |
|
1024 | |||
1025 | pub fn copy_map_len(&self) -> usize { |
|
1025 | pub fn copy_map_len(&self) -> usize { | |
1026 | let map = self.get_map(); |
|
1026 | let map = self.get_map(); | |
1027 | map.nodes_with_copy_source_count as usize |
|
1027 | map.nodes_with_copy_source_count as usize | |
1028 | } |
|
1028 | } | |
1029 |
|
1029 | |||
1030 | pub fn copy_map_iter(&self) -> CopyMapIter<'_> { |
|
1030 | pub fn copy_map_iter(&self) -> CopyMapIter<'_> { | |
1031 | let map = self.get_map(); |
|
1031 | let map = self.get_map(); | |
1032 | Box::new(filter_map_results(map.iter_nodes(), move |node| { |
|
1032 | Box::new(filter_map_results(map.iter_nodes(), move |node| { | |
1033 | Ok(if let Some(source) = node.copy_source(map.on_disk)? { |
|
1033 | Ok(if let Some(source) = node.copy_source(map.on_disk)? { | |
1034 | Some((node.full_path(map.on_disk)?, source)) |
|
1034 | Some((node.full_path(map.on_disk)?, source)) | |
1035 | } else { |
|
1035 | } else { | |
1036 | None |
|
1036 | None | |
1037 | }) |
|
1037 | }) | |
1038 | })) |
|
1038 | })) | |
1039 | } |
|
1039 | } | |
1040 |
|
1040 | |||
1041 | pub fn copy_map_contains_key( |
|
1041 | pub fn copy_map_contains_key( | |
1042 | &self, |
|
1042 | &self, | |
1043 | key: &HgPath, |
|
1043 | key: &HgPath, | |
1044 | ) -> Result<bool, DirstateV2ParseError> { |
|
1044 | ) -> Result<bool, DirstateV2ParseError> { | |
1045 | let map = self.get_map(); |
|
1045 | let map = self.get_map(); | |
1046 | Ok(if let Some(node) = map.get_node(key)? { |
|
1046 | Ok(if let Some(node) = map.get_node(key)? { | |
1047 | node.has_copy_source() |
|
1047 | node.has_copy_source() | |
1048 | } else { |
|
1048 | } else { | |
1049 | false |
|
1049 | false | |
1050 | }) |
|
1050 | }) | |
1051 | } |
|
1051 | } | |
1052 |
|
1052 | |||
1053 | pub fn copy_map_get( |
|
1053 | pub fn copy_map_get( | |
1054 | &self, |
|
1054 | &self, | |
1055 | key: &HgPath, |
|
1055 | key: &HgPath, | |
1056 | ) -> Result<Option<&HgPath>, DirstateV2ParseError> { |
|
1056 | ) -> Result<Option<&HgPath>, DirstateV2ParseError> { | |
1057 | let map = self.get_map(); |
|
1057 | let map = self.get_map(); | |
1058 | if let Some(node) = map.get_node(key)? { |
|
1058 | if let Some(node) = map.get_node(key)? { | |
1059 | if let Some(source) = node.copy_source(map.on_disk)? { |
|
1059 | if let Some(source) = node.copy_source(map.on_disk)? { | |
1060 | return Ok(Some(source)); |
|
1060 | return Ok(Some(source)); | |
1061 | } |
|
1061 | } | |
1062 | } |
|
1062 | } | |
1063 | Ok(None) |
|
1063 | Ok(None) | |
1064 | } |
|
1064 | } | |
1065 |
|
1065 | |||
1066 | pub fn copy_map_remove( |
|
1066 | pub fn copy_map_remove( | |
1067 | &mut self, |
|
1067 | &mut self, | |
1068 | key: &HgPath, |
|
1068 | key: &HgPath, | |
1069 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { |
|
1069 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { | |
1070 | let map = self.get_map_mut(); |
|
1070 | let map = self.get_map_mut(); | |
1071 | let count = &mut map.nodes_with_copy_source_count; |
|
1071 | let count = &mut map.nodes_with_copy_source_count; | |
1072 | let unreachable_bytes = &mut map.unreachable_bytes; |
|
1072 | let unreachable_bytes = &mut map.unreachable_bytes; | |
1073 | Ok(DirstateMap::get_node_mut( |
|
1073 | Ok(DirstateMap::get_node_mut( | |
1074 | map.on_disk, |
|
1074 | map.on_disk, | |
1075 | unreachable_bytes, |
|
1075 | unreachable_bytes, | |
1076 | &mut map.root, |
|
1076 | &mut map.root, | |
1077 | key, |
|
1077 | key, | |
1078 | )? |
|
1078 | )? | |
1079 | .and_then(|node| { |
|
1079 | .and_then(|node| { | |
1080 | if let Some(source) = &node.copy_source { |
|
1080 | if let Some(source) = &node.copy_source { | |
1081 | *count -= 1; |
|
1081 | *count -= 1; | |
1082 | DirstateMap::count_dropped_path(unreachable_bytes, source); |
|
1082 | DirstateMap::count_dropped_path(unreachable_bytes, source); | |
1083 | } |
|
1083 | } | |
1084 | node.copy_source.take().map(Cow::into_owned) |
|
1084 | node.copy_source.take().map(Cow::into_owned) | |
1085 | })) |
|
1085 | })) | |
1086 | } |
|
1086 | } | |
1087 |
|
1087 | |||
1088 | pub fn copy_map_insert( |
|
1088 | pub fn copy_map_insert( | |
1089 | &mut self, |
|
1089 | &mut self, | |
1090 | key: HgPathBuf, |
|
1090 | key: HgPathBuf, | |
1091 | value: HgPathBuf, |
|
1091 | value: HgPathBuf, | |
1092 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { |
|
1092 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { | |
1093 | let map = self.get_map_mut(); |
|
1093 | let map = self.get_map_mut(); | |
1094 | let node = DirstateMap::get_or_insert_node( |
|
1094 | let node = DirstateMap::get_or_insert_node( | |
1095 | map.on_disk, |
|
1095 | map.on_disk, | |
1096 | &mut map.unreachable_bytes, |
|
1096 | &mut map.unreachable_bytes, | |
1097 | &mut map.root, |
|
1097 | &mut map.root, | |
1098 | &key, |
|
1098 | &key, | |
1099 | WithBasename::to_cow_owned, |
|
1099 | WithBasename::to_cow_owned, | |
1100 | |_ancestor| {}, |
|
1100 | |_ancestor| {}, | |
1101 | )?; |
|
1101 | )?; | |
1102 | if node.copy_source.is_none() { |
|
1102 | if node.copy_source.is_none() { | |
1103 | map.nodes_with_copy_source_count += 1 |
|
1103 | map.nodes_with_copy_source_count += 1 | |
1104 | } |
|
1104 | } | |
1105 | Ok(node.copy_source.replace(value.into()).map(Cow::into_owned)) |
|
1105 | Ok(node.copy_source.replace(value.into()).map(Cow::into_owned)) | |
1106 | } |
|
1106 | } | |
1107 |
|
1107 | |||
1108 | pub fn len(&self) -> usize { |
|
1108 | pub fn len(&self) -> usize { | |
1109 | let map = self.get_map(); |
|
1109 | let map = self.get_map(); | |
1110 | map.nodes_with_entry_count as usize |
|
1110 | map.nodes_with_entry_count as usize | |
1111 | } |
|
1111 | } | |
1112 |
|
1112 | |||
1113 | pub fn contains_key( |
|
1113 | pub fn contains_key( | |
1114 | &self, |
|
1114 | &self, | |
1115 | key: &HgPath, |
|
1115 | key: &HgPath, | |
1116 | ) -> Result<bool, DirstateV2ParseError> { |
|
1116 | ) -> Result<bool, DirstateV2ParseError> { | |
1117 | Ok(self.get(key)?.is_some()) |
|
1117 | Ok(self.get(key)?.is_some()) | |
1118 | } |
|
1118 | } | |
1119 |
|
1119 | |||
1120 | pub fn get( |
|
1120 | pub fn get( | |
1121 | &self, |
|
1121 | &self, | |
1122 | key: &HgPath, |
|
1122 | key: &HgPath, | |
1123 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { |
|
1123 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { | |
1124 | let map = self.get_map(); |
|
1124 | let map = self.get_map(); | |
1125 | Ok(if let Some(node) = map.get_node(key)? { |
|
1125 | Ok(if let Some(node) = map.get_node(key)? { | |
1126 | node.entry()? |
|
1126 | node.entry()? | |
1127 | } else { |
|
1127 | } else { | |
1128 | None |
|
1128 | None | |
1129 | }) |
|
1129 | }) | |
1130 | } |
|
1130 | } | |
1131 |
|
1131 | |||
1132 | pub fn iter(&self) -> StateMapIter<'_> { |
|
1132 | pub fn iter(&self) -> StateMapIter<'_> { | |
1133 | let map = self.get_map(); |
|
1133 | let map = self.get_map(); | |
1134 | Box::new(filter_map_results(map.iter_nodes(), move |node| { |
|
1134 | Box::new(filter_map_results(map.iter_nodes(), move |node| { | |
1135 | Ok(if let Some(entry) = node.entry()? { |
|
1135 | Ok(if let Some(entry) = node.entry()? { | |
1136 | Some((node.full_path(map.on_disk)?, entry)) |
|
1136 | Some((node.full_path(map.on_disk)?, entry)) | |
1137 | } else { |
|
1137 | } else { | |
1138 | None |
|
1138 | None | |
1139 | }) |
|
1139 | }) | |
1140 | })) |
|
1140 | })) | |
1141 | } |
|
1141 | } | |
1142 |
|
1142 | |||
1143 | pub fn iter_tracked_dirs( |
|
1143 | pub fn iter_tracked_dirs( | |
1144 | &mut self, |
|
1144 | &mut self, | |
1145 | ) -> Result< |
|
1145 | ) -> Result< | |
1146 | Box< |
|
1146 | Box< | |
1147 | dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> |
|
1147 | dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> | |
1148 | + Send |
|
1148 | + Send | |
1149 | + '_, |
|
1149 | + '_, | |
1150 | >, |
|
1150 | >, | |
1151 | DirstateError, |
|
1151 | DirstateError, | |
1152 | > { |
|
1152 | > { | |
1153 | let map = self.get_map_mut(); |
|
1153 | let map = self.get_map_mut(); | |
1154 | let on_disk = map.on_disk; |
|
1154 | let on_disk = map.on_disk; | |
1155 | Ok(Box::new(filter_map_results( |
|
1155 | Ok(Box::new(filter_map_results( | |
1156 | map.iter_nodes(), |
|
1156 | map.iter_nodes(), | |
1157 | move |node| { |
|
1157 | move |node| { | |
1158 | Ok(if node.tracked_descendants_count() > 0 { |
|
1158 | Ok(if node.tracked_descendants_count() > 0 { | |
1159 | Some(node.full_path(on_disk)?) |
|
1159 | Some(node.full_path(on_disk)?) | |
1160 | } else { |
|
1160 | } else { | |
1161 | None |
|
1161 | None | |
1162 | }) |
|
1162 | }) | |
1163 | }, |
|
1163 | }, | |
1164 | ))) |
|
1164 | ))) | |
1165 | } |
|
1165 | } | |
1166 |
|
1166 | |||
1167 | pub fn debug_iter( |
|
1167 | pub fn debug_iter( | |
1168 | &self, |
|
1168 | &self, | |
1169 | all: bool, |
|
1169 | all: bool, | |
1170 | ) -> Box< |
|
1170 | ) -> Box< | |
1171 | dyn Iterator< |
|
1171 | dyn Iterator< | |
1172 | Item = Result< |
|
1172 | Item = Result< | |
1173 | (&HgPath, (u8, i32, i32, i32)), |
|
1173 | (&HgPath, (u8, i32, i32, i32)), | |
1174 | DirstateV2ParseError, |
|
1174 | DirstateV2ParseError, | |
1175 | >, |
|
1175 | >, | |
1176 | > + Send |
|
1176 | > + Send | |
1177 | + '_, |
|
1177 | + '_, | |
1178 | > { |
|
1178 | > { | |
1179 | let map = self.get_map(); |
|
1179 | let map = self.get_map(); | |
1180 | Box::new(filter_map_results(map.iter_nodes(), move |node| { |
|
1180 | Box::new(filter_map_results(map.iter_nodes(), move |node| { | |
1181 | let debug_tuple = if let Some(entry) = node.entry()? { |
|
1181 | let debug_tuple = if let Some(entry) = node.entry()? { | |
1182 | entry.debug_tuple() |
|
1182 | entry.debug_tuple() | |
1183 | } else if !all { |
|
1183 | } else if !all { | |
1184 | return Ok(None); |
|
1184 | return Ok(None); | |
1185 | } else if let Some(mtime) = node.cached_directory_mtime() { |
|
1185 | } else if let Some(mtime) = node.cached_directory_mtime() { | |
1186 | (b' ', 0, -1, mtime.seconds() as i32) |
|
1186 | (b' ', 0, -1, mtime.seconds() as i32) | |
1187 | } else { |
|
1187 | } else { | |
1188 | (b' ', 0, -1, -1) |
|
1188 | (b' ', 0, -1, -1) | |
1189 | }; |
|
1189 | }; | |
1190 | Ok(Some((node.full_path(map.on_disk)?, debug_tuple))) |
|
1190 | Ok(Some((node.full_path(map.on_disk)?, debug_tuple))) | |
1191 | })) |
|
1191 | })) | |
1192 | } |
|
1192 | } | |
1193 | } |
|
1193 | } |
@@ -1,218 +1,219 | |||||
1 | use cpython::exc; |
|
1 | use cpython::exc; | |
2 | use cpython::PyBytes; |
|
2 | use cpython::PyBytes; | |
3 | use cpython::PyErr; |
|
3 | use cpython::PyErr; | |
4 | use cpython::PyNone; |
|
4 | use cpython::PyNone; | |
5 | use cpython::PyObject; |
|
5 | use cpython::PyObject; | |
6 | use cpython::PyResult; |
|
6 | use cpython::PyResult; | |
7 | use cpython::Python; |
|
7 | use cpython::Python; | |
8 | use cpython::PythonObject; |
|
8 | use cpython::PythonObject; | |
9 | use hg::dirstate::entry::Flags; |
|
|||
10 | use hg::dirstate::DirstateEntry; |
|
9 | use hg::dirstate::DirstateEntry; | |
11 | use hg::dirstate::EntryState; |
|
10 | use hg::dirstate::EntryState; | |
12 | use std::cell::Cell; |
|
11 | use std::cell::Cell; | |
13 | use std::convert::TryFrom; |
|
12 | use std::convert::TryFrom; | |
14 |
|
13 | |||
15 | py_class!(pub class DirstateItem |py| { |
|
14 | py_class!(pub class DirstateItem |py| { | |
16 | data entry: Cell<DirstateEntry>; |
|
15 | data entry: Cell<DirstateEntry>; | |
17 |
|
16 | |||
18 | def __new__( |
|
17 | def __new__( | |
19 | _cls, |
|
18 | _cls, | |
20 | wc_tracked: bool = false, |
|
19 | wc_tracked: bool = false, | |
21 | p1_tracked: bool = false, |
|
20 | p1_tracked: bool = false, | |
22 |
p2_ |
|
21 | p2_info: bool = false, | |
23 |
|
|
22 | has_meaningful_data: bool = true, | |
24 |
|
|
23 | has_meaningful_mtime: bool = true, | |
25 | clean_p2: bool = false, |
|
|||
26 | possibly_dirty: bool = false, |
|
|||
27 | parentfiledata: Option<(i32, i32, i32)> = None, |
|
24 | parentfiledata: Option<(i32, i32, i32)> = None, | |
28 |
|
25 | |||
29 | ) -> PyResult<DirstateItem> { |
|
26 | ) -> PyResult<DirstateItem> { | |
30 | let mut flags = Flags::empty(); |
|
27 | let mut mode_size_opt = None; | |
31 | flags.set(Flags::WDIR_TRACKED, wc_tracked); |
|
28 | let mut mtime_opt = None; | |
32 | flags.set(Flags::P1_TRACKED, p1_tracked); |
|
29 | if let Some((mode, size, mtime)) = parentfiledata { | |
33 | flags.set(Flags::P2_TRACKED, p2_tracked); |
|
30 | if has_meaningful_data { | |
34 | flags.set(Flags::MERGED, merged); |
|
31 | mode_size_opt = Some((mode, size)) | |
35 | flags.set(Flags::CLEAN_P1, clean_p1); |
|
32 | } | |
36 | flags.set(Flags::CLEAN_P2, clean_p2); |
|
33 | if has_meaningful_mtime { | |
37 | flags.set(Flags::POSSIBLY_DIRTY, possibly_dirty); |
|
34 | mtime_opt = Some(mtime) | |
38 | let entry = DirstateEntry::new(flags, parentfiledata); |
|
35 | } | |
|
36 | } | |||
|
37 | let entry = DirstateEntry::new( | |||
|
38 | wc_tracked, p1_tracked, p2_info, mode_size_opt, mtime_opt, | |||
|
39 | ); | |||
39 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
40 | DirstateItem::create_instance(py, Cell::new(entry)) | |
40 | } |
|
41 | } | |
41 |
|
42 | |||
42 | @property |
|
43 | @property | |
43 | def state(&self) -> PyResult<PyBytes> { |
|
44 | def state(&self) -> PyResult<PyBytes> { | |
44 | let state_byte: u8 = self.entry(py).get().state().into(); |
|
45 | let state_byte: u8 = self.entry(py).get().state().into(); | |
45 | Ok(PyBytes::new(py, &[state_byte])) |
|
46 | Ok(PyBytes::new(py, &[state_byte])) | |
46 | } |
|
47 | } | |
47 |
|
48 | |||
48 | @property |
|
49 | @property | |
49 | def mode(&self) -> PyResult<i32> { |
|
50 | def mode(&self) -> PyResult<i32> { | |
50 | Ok(self.entry(py).get().mode()) |
|
51 | Ok(self.entry(py).get().mode()) | |
51 | } |
|
52 | } | |
52 |
|
53 | |||
53 | @property |
|
54 | @property | |
54 | def size(&self) -> PyResult<i32> { |
|
55 | def size(&self) -> PyResult<i32> { | |
55 | Ok(self.entry(py).get().size()) |
|
56 | Ok(self.entry(py).get().size()) | |
56 | } |
|
57 | } | |
57 |
|
58 | |||
58 | @property |
|
59 | @property | |
59 | def mtime(&self) -> PyResult<i32> { |
|
60 | def mtime(&self) -> PyResult<i32> { | |
60 | Ok(self.entry(py).get().mtime()) |
|
61 | Ok(self.entry(py).get().mtime()) | |
61 | } |
|
62 | } | |
62 |
|
63 | |||
63 | @property |
|
64 | @property | |
64 | def tracked(&self) -> PyResult<bool> { |
|
65 | def tracked(&self) -> PyResult<bool> { | |
65 | Ok(self.entry(py).get().tracked()) |
|
66 | Ok(self.entry(py).get().tracked()) | |
66 | } |
|
67 | } | |
67 |
|
68 | |||
68 | @property |
|
69 | @property | |
69 | def added(&self) -> PyResult<bool> { |
|
70 | def added(&self) -> PyResult<bool> { | |
70 | Ok(self.entry(py).get().added()) |
|
71 | Ok(self.entry(py).get().added()) | |
71 | } |
|
72 | } | |
72 |
|
73 | |||
73 | @property |
|
74 | @property | |
74 | def merged(&self) -> PyResult<bool> { |
|
75 | def merged(&self) -> PyResult<bool> { | |
75 | Ok(self.entry(py).get().merged()) |
|
76 | Ok(self.entry(py).get().merged()) | |
76 | } |
|
77 | } | |
77 |
|
78 | |||
78 | @property |
|
79 | @property | |
79 | def removed(&self) -> PyResult<bool> { |
|
80 | def removed(&self) -> PyResult<bool> { | |
80 | Ok(self.entry(py).get().removed()) |
|
81 | Ok(self.entry(py).get().removed()) | |
81 | } |
|
82 | } | |
82 |
|
83 | |||
83 | @property |
|
84 | @property | |
84 | def from_p2(&self) -> PyResult<bool> { |
|
85 | def from_p2(&self) -> PyResult<bool> { | |
85 | Ok(self.entry(py).get().from_p2()) |
|
86 | Ok(self.entry(py).get().from_p2()) | |
86 | } |
|
87 | } | |
87 |
|
88 | |||
88 | @property |
|
89 | @property | |
89 | def maybe_clean(&self) -> PyResult<bool> { |
|
90 | def maybe_clean(&self) -> PyResult<bool> { | |
90 | Ok(self.entry(py).get().maybe_clean()) |
|
91 | Ok(self.entry(py).get().maybe_clean()) | |
91 | } |
|
92 | } | |
92 |
|
93 | |||
93 | @property |
|
94 | @property | |
94 | def any_tracked(&self) -> PyResult<bool> { |
|
95 | def any_tracked(&self) -> PyResult<bool> { | |
95 | Ok(self.entry(py).get().any_tracked()) |
|
96 | Ok(self.entry(py).get().any_tracked()) | |
96 | } |
|
97 | } | |
97 |
|
98 | |||
98 | def v1_state(&self) -> PyResult<PyBytes> { |
|
99 | def v1_state(&self) -> PyResult<PyBytes> { | |
99 | let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data(); |
|
100 | let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data(); | |
100 | let state_byte: u8 = state.into(); |
|
101 | let state_byte: u8 = state.into(); | |
101 | Ok(PyBytes::new(py, &[state_byte])) |
|
102 | Ok(PyBytes::new(py, &[state_byte])) | |
102 | } |
|
103 | } | |
103 |
|
104 | |||
104 | def v1_mode(&self) -> PyResult<i32> { |
|
105 | def v1_mode(&self) -> PyResult<i32> { | |
105 | let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data(); |
|
106 | let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data(); | |
106 | Ok(mode) |
|
107 | Ok(mode) | |
107 | } |
|
108 | } | |
108 |
|
109 | |||
109 | def v1_size(&self) -> PyResult<i32> { |
|
110 | def v1_size(&self) -> PyResult<i32> { | |
110 | let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data(); |
|
111 | let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data(); | |
111 | Ok(size) |
|
112 | Ok(size) | |
112 | } |
|
113 | } | |
113 |
|
114 | |||
114 | def v1_mtime(&self) -> PyResult<i32> { |
|
115 | def v1_mtime(&self) -> PyResult<i32> { | |
115 | let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data(); |
|
116 | let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data(); | |
116 | Ok(mtime) |
|
117 | Ok(mtime) | |
117 | } |
|
118 | } | |
118 |
|
119 | |||
119 | def need_delay(&self, now: i32) -> PyResult<bool> { |
|
120 | def need_delay(&self, now: i32) -> PyResult<bool> { | |
120 | Ok(self.entry(py).get().mtime_is_ambiguous(now)) |
|
121 | Ok(self.entry(py).get().mtime_is_ambiguous(now)) | |
121 | } |
|
122 | } | |
122 |
|
123 | |||
123 | @classmethod |
|
124 | @classmethod | |
124 | def from_v1_data( |
|
125 | def from_v1_data( | |
125 | _cls, |
|
126 | _cls, | |
126 | state: PyBytes, |
|
127 | state: PyBytes, | |
127 | mode: i32, |
|
128 | mode: i32, | |
128 | size: i32, |
|
129 | size: i32, | |
129 | mtime: i32, |
|
130 | mtime: i32, | |
130 | ) -> PyResult<Self> { |
|
131 | ) -> PyResult<Self> { | |
131 | let state = <[u8; 1]>::try_from(state.data(py)) |
|
132 | let state = <[u8; 1]>::try_from(state.data(py)) | |
132 | .ok() |
|
133 | .ok() | |
133 | .and_then(|state| EntryState::try_from(state[0]).ok()) |
|
134 | .and_then(|state| EntryState::try_from(state[0]).ok()) | |
134 | .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?; |
|
135 | .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?; | |
135 | let entry = DirstateEntry::from_v1_data(state, mode, size, mtime); |
|
136 | let entry = DirstateEntry::from_v1_data(state, mode, size, mtime); | |
136 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
137 | DirstateItem::create_instance(py, Cell::new(entry)) | |
137 | } |
|
138 | } | |
138 |
|
139 | |||
139 | @classmethod |
|
140 | @classmethod | |
140 | def new_added(_cls) -> PyResult<Self> { |
|
141 | def new_added(_cls) -> PyResult<Self> { | |
141 | let entry = DirstateEntry::new_added(); |
|
142 | let entry = DirstateEntry::new_added(); | |
142 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
143 | DirstateItem::create_instance(py, Cell::new(entry)) | |
143 | } |
|
144 | } | |
144 |
|
145 | |||
145 | @classmethod |
|
146 | @classmethod | |
146 | def new_merged(_cls) -> PyResult<Self> { |
|
147 | def new_merged(_cls) -> PyResult<Self> { | |
147 | let entry = DirstateEntry::new_merged(); |
|
148 | let entry = DirstateEntry::new_merged(); | |
148 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
149 | DirstateItem::create_instance(py, Cell::new(entry)) | |
149 | } |
|
150 | } | |
150 |
|
151 | |||
151 | @classmethod |
|
152 | @classmethod | |
152 | def new_from_p2(_cls) -> PyResult<Self> { |
|
153 | def new_from_p2(_cls) -> PyResult<Self> { | |
153 | let entry = DirstateEntry::new_from_p2(); |
|
154 | let entry = DirstateEntry::new_from_p2(); | |
154 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
155 | DirstateItem::create_instance(py, Cell::new(entry)) | |
155 | } |
|
156 | } | |
156 |
|
157 | |||
157 | @classmethod |
|
158 | @classmethod | |
158 | def new_possibly_dirty(_cls) -> PyResult<Self> { |
|
159 | def new_possibly_dirty(_cls) -> PyResult<Self> { | |
159 | let entry = DirstateEntry::new_possibly_dirty(); |
|
160 | let entry = DirstateEntry::new_possibly_dirty(); | |
160 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
161 | DirstateItem::create_instance(py, Cell::new(entry)) | |
161 | } |
|
162 | } | |
162 |
|
163 | |||
163 | @classmethod |
|
164 | @classmethod | |
164 | def new_normal(_cls, mode: i32, size: i32, mtime: i32) -> PyResult<Self> { |
|
165 | def new_normal(_cls, mode: i32, size: i32, mtime: i32) -> PyResult<Self> { | |
165 | let entry = DirstateEntry::new_normal(mode, size, mtime); |
|
166 | let entry = DirstateEntry::new_normal(mode, size, mtime); | |
166 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
167 | DirstateItem::create_instance(py, Cell::new(entry)) | |
167 | } |
|
168 | } | |
168 |
|
169 | |||
169 | def drop_merge_data(&self) -> PyResult<PyNone> { |
|
170 | def drop_merge_data(&self) -> PyResult<PyNone> { | |
170 | self.update(py, |entry| entry.drop_merge_data()); |
|
171 | self.update(py, |entry| entry.drop_merge_data()); | |
171 | Ok(PyNone) |
|
172 | Ok(PyNone) | |
172 | } |
|
173 | } | |
173 |
|
174 | |||
174 | def set_clean( |
|
175 | def set_clean( | |
175 | &self, |
|
176 | &self, | |
176 | mode: i32, |
|
177 | mode: i32, | |
177 | size: i32, |
|
178 | size: i32, | |
178 | mtime: i32, |
|
179 | mtime: i32, | |
179 | ) -> PyResult<PyNone> { |
|
180 | ) -> PyResult<PyNone> { | |
180 | self.update(py, |entry| entry.set_clean(mode, size, mtime)); |
|
181 | self.update(py, |entry| entry.set_clean(mode, size, mtime)); | |
181 | Ok(PyNone) |
|
182 | Ok(PyNone) | |
182 | } |
|
183 | } | |
183 |
|
184 | |||
184 | def set_possibly_dirty(&self) -> PyResult<PyNone> { |
|
185 | def set_possibly_dirty(&self) -> PyResult<PyNone> { | |
185 | self.update(py, |entry| entry.set_possibly_dirty()); |
|
186 | self.update(py, |entry| entry.set_possibly_dirty()); | |
186 | Ok(PyNone) |
|
187 | Ok(PyNone) | |
187 | } |
|
188 | } | |
188 |
|
189 | |||
189 | def set_tracked(&self) -> PyResult<PyNone> { |
|
190 | def set_tracked(&self) -> PyResult<PyNone> { | |
190 | self.update(py, |entry| entry.set_tracked()); |
|
191 | self.update(py, |entry| entry.set_tracked()); | |
191 | Ok(PyNone) |
|
192 | Ok(PyNone) | |
192 | } |
|
193 | } | |
193 |
|
194 | |||
194 | def set_untracked(&self) -> PyResult<PyNone> { |
|
195 | def set_untracked(&self) -> PyResult<PyNone> { | |
195 | self.update(py, |entry| entry.set_untracked()); |
|
196 | self.update(py, |entry| entry.set_untracked()); | |
196 | Ok(PyNone) |
|
197 | Ok(PyNone) | |
197 | } |
|
198 | } | |
198 | }); |
|
199 | }); | |
199 |
|
200 | |||
200 | impl DirstateItem { |
|
201 | impl DirstateItem { | |
201 | pub fn new_as_pyobject( |
|
202 | pub fn new_as_pyobject( | |
202 | py: Python<'_>, |
|
203 | py: Python<'_>, | |
203 | entry: DirstateEntry, |
|
204 | entry: DirstateEntry, | |
204 | ) -> PyResult<PyObject> { |
|
205 | ) -> PyResult<PyObject> { | |
205 | Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object()) |
|
206 | Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object()) | |
206 | } |
|
207 | } | |
207 |
|
208 | |||
208 | pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry { |
|
209 | pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry { | |
209 | self.entry(py).get() |
|
210 | self.entry(py).get() | |
210 | } |
|
211 | } | |
211 |
|
212 | |||
212 | // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable |
|
213 | // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable | |
213 | pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) { |
|
214 | pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) { | |
214 | let mut entry = self.entry(py).get(); |
|
215 | let mut entry = self.entry(py).get(); | |
215 | f(&mut entry); |
|
216 | f(&mut entry); | |
216 | self.entry(py).set(entry) |
|
217 | self.entry(py).set(entry) | |
217 | } |
|
218 | } | |
218 | } |
|
219 | } |
General Comments 0
You need to be logged in to leave comments.
Login now