正在显示
16 个修改的文件
包含
141 行增加
和
16 行删除
| @@ -13,7 +13,7 @@ type UpdateMappingRuleCommand struct { | @@ -13,7 +13,7 @@ type UpdateMappingRuleCommand struct { | ||
| 13 | // 匹配规则ID | 13 | // 匹配规则ID |
| 14 | MappingRuleId int `cname:"匹配规则ID" json:"mappingRuleId" valid:"Required"` | 14 | MappingRuleId int `cname:"匹配规则ID" json:"mappingRuleId" valid:"Required"` |
| 15 | // 名称 | 15 | // 名称 |
| 16 | - Name string `cname:"名称" json:"name" valid:"Required"` | 16 | + Name string `cname:"名称" json:"name"` |
| 17 | // 校验文件列 | 17 | // 校验文件列 |
| 18 | MappingFields []*domain.MappingField `cname:"匹配规则列表" json:"mappingFields" valid:"Required"` | 18 | MappingFields []*domain.MappingField `cname:"匹配规则列表" json:"mappingFields" valid:"Required"` |
| 19 | } | 19 | } |
| @@ -288,11 +288,14 @@ func (mappingRuleService *MappingRuleService) UpdateMappingRule(ctx *domain.Cont | @@ -288,11 +288,14 @@ func (mappingRuleService *MappingRuleService) UpdateMappingRule(ctx *domain.Cont | ||
| 288 | return nil, application.ThrowError(application.INTERNAL_SERVER_ERROR, err.Error()) | 288 | return nil, application.ThrowError(application.INTERNAL_SERVER_ERROR, err.Error()) |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | - if duplicateRule, e := mappingRuleRepository.FindOne(map[string]interface{}{"context": ctx, "name": cmd.Name}); e == nil && duplicateRule != nil && duplicateRule.MappingRuleId != cmd.MappingRuleId { | ||
| 292 | - return nil, application.ThrowError(application.INTERNAL_SERVER_ERROR, "方案名称重复") | 291 | + if len(cmd.Name) != 0 { |
| 292 | + if duplicateRule, e := mappingRuleRepository.FindOne(map[string]interface{}{"context": ctx, "name": cmd.Name}); e == nil && duplicateRule != nil && duplicateRule.MappingRuleId != cmd.MappingRuleId { | ||
| 293 | + return nil, application.ThrowError(application.INTERNAL_SERVER_ERROR, "方案名称重复") | ||
| 294 | + } | ||
| 295 | + } | ||
| 296 | + if len(cmd.Name) > 0 { | ||
| 297 | + mappingRule.Name = cmd.Name | ||
| 293 | } | 298 | } |
| 294 | - | ||
| 295 | - mappingRule.Name = cmd.Name | ||
| 296 | mappingRule.MappingFields = cmd.MappingFields | 299 | mappingRule.MappingFields = cmd.MappingFields |
| 297 | mappingRule.VerifiedFileFields = fileTable.Fields(false) | 300 | mappingRule.VerifiedFileFields = fileTable.Fields(false) |
| 298 | mappingRule.UpdatedAt = time.Now() | 301 | mappingRule.UpdatedAt = time.Now() |
| @@ -52,12 +52,18 @@ type Condition struct { | @@ -52,12 +52,18 @@ type Condition struct { | ||
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | func (t *DataTable) OptionalValue() []string { | 54 | func (t *DataTable) OptionalValue() []string { |
| 55 | + //set := make(map[string]string) | ||
| 55 | var values = make([]string, 0) | 56 | var values = make([]string, 0) |
| 56 | if len(t.Data) > 0 && len(t.Data[0]) == 1 { | 57 | if len(t.Data) > 0 && len(t.Data[0]) == 1 { |
| 57 | for i := range t.Data { | 58 | for i := range t.Data { |
| 58 | if len(t.Data[i]) == 0 { | 59 | if len(t.Data[i]) == 0 { |
| 59 | continue | 60 | continue |
| 60 | } | 61 | } |
| 62 | + //if _, ok := set[t.Data[i][0]]; ok { | ||
| 63 | + // continue | ||
| 64 | + //} else { | ||
| 65 | + // set[t.Data[i][0]] = "" | ||
| 66 | + //} | ||
| 61 | values = append(values, t.Data[i][0]) | 67 | values = append(values, t.Data[i][0]) |
| 62 | } | 68 | } |
| 63 | } | 69 | } |
| @@ -86,6 +86,7 @@ var ( | @@ -86,6 +86,7 @@ var ( | ||
| 86 | var SQLTypeMap = map[string]string{ | 86 | var SQLTypeMap = map[string]string{ |
| 87 | String.ToString(): "文本", | 87 | String.ToString(): "文本", |
| 88 | Int.ToString(): "整数", | 88 | Int.ToString(): "整数", |
| 89 | + BigInt.ToString(): "整数", | ||
| 89 | Float.ToString(): "小数", | 90 | Float.ToString(): "小数", |
| 90 | Date.ToString(): "日期", | 91 | Date.ToString(): "日期", |
| 91 | Datetime.ToString(): "日期时间", | 92 | Datetime.ToString(): "日期时间", |
| @@ -180,8 +181,8 @@ var DBTables = map[int]*Table{ | @@ -180,8 +181,8 @@ var DBTables = map[int]*Table{ | ||
| 180 | { | 181 | { |
| 181 | Index: 5, | 182 | Index: 5, |
| 182 | Name: "操作时间", | 183 | Name: "操作时间", |
| 183 | - SQLName: "created_at", | ||
| 184 | - SQLType: Datetime.ToString(), | 184 | + SQLName: "log_time", //"created_at", |
| 185 | + SQLType: String.ToString(), | ||
| 185 | Flag: MainTableField, | 186 | Flag: MainTableField, |
| 186 | }, | 187 | }, |
| 187 | { | 188 | { |
| @@ -73,6 +73,7 @@ func (file *File) CopyTo(fileType FileType, ctx *Context) *File { | @@ -73,6 +73,7 @@ func (file *File) CopyTo(fileType FileType, ctx *Context) *File { | ||
| 73 | Url: file.FileInfo.Url, | 73 | Url: file.FileInfo.Url, |
| 74 | Ext: file.FileInfo.Ext, | 74 | Ext: file.FileInfo.Ext, |
| 75 | RowCount: file.FileInfo.RowCount, | 75 | RowCount: file.FileInfo.RowCount, |
| 76 | + TableId: file.FileInfo.TableId, | ||
| 76 | }, | 77 | }, |
| 77 | CreatedAt: time.Now(), | 78 | CreatedAt: time.Now(), |
| 78 | UpdatedAt: time.Now(), | 79 | UpdatedAt: time.Now(), |
| @@ -26,6 +26,8 @@ type Log struct { | @@ -26,6 +26,8 @@ type Log struct { | ||
| 26 | CreatedAt time.Time `json:"createdAt"` | 26 | CreatedAt time.Time `json:"createdAt"` |
| 27 | // 扩展 | 27 | // 扩展 |
| 28 | Context *Context `json:"context"` | 28 | Context *Context `json:"context"` |
| 29 | + // 日志时间 | ||
| 30 | + LogTime string `json:"log_time"` | ||
| 29 | } | 31 | } |
| 30 | 32 | ||
| 31 | type LogRepository interface { | 33 | type LogRepository interface { |
| 1 | package domain | 1 | package domain |
| 2 | 2 | ||
| 3 | +import "time" | ||
| 4 | + | ||
| 3 | // LogEntry 日志内容 | 5 | // LogEntry 日志内容 |
| 4 | type LogEntry struct { | 6 | type LogEntry struct { |
| 5 | // 对象名称 数据表名 / 文件名 | 7 | // 对象名称 数据表名 / 文件名 |
| @@ -14,6 +16,8 @@ type LogEntry struct { | @@ -14,6 +16,8 @@ type LogEntry struct { | ||
| 14 | OperatorName string `json:"operatorName"` | 16 | OperatorName string `json:"operatorName"` |
| 15 | // 错误级别 | 17 | // 错误级别 |
| 16 | Level string `json:"level"` | 18 | Level string `json:"level"` |
| 19 | + // 日志时间 | ||
| 20 | + LogTime string `json:"logTime"` | ||
| 17 | // 错误信息 | 21 | // 错误信息 |
| 18 | Error string `json:"error"` | 22 | Error string `json:"error"` |
| 19 | ctx *Context `json:"-"` | 23 | ctx *Context `json:"-"` |
| @@ -33,6 +37,7 @@ func NewLogEntry(fileOrTableName string, objectType string, operationType Operat | @@ -33,6 +37,7 @@ func NewLogEntry(fileOrTableName string, objectType string, operationType Operat | ||
| 33 | ObjectType: objectType, | 37 | ObjectType: objectType, |
| 34 | OperationType: operationType.ToString(), | 38 | OperationType: operationType.ToString(), |
| 35 | OperatorName: ctx.OperatorName, | 39 | OperatorName: ctx.OperatorName, |
| 40 | + LogTime: time.Now().Local().Format("2006-01-02 15:04:05"), | ||
| 36 | ctx: ctx, | 41 | ctx: ctx, |
| 37 | } | 42 | } |
| 38 | } | 43 | } |
| @@ -226,13 +226,18 @@ type ( | @@ -226,13 +226,18 @@ type ( | ||
| 226 | ) | 226 | ) |
| 227 | 227 | ||
| 228 | func NewTableAppendRequest(param domain.ReqAppendData) TableAppendRequest { | 228 | func NewTableAppendRequest(param domain.ReqAppendData) TableAppendRequest { |
| 229 | - return TableAppendRequest{ | 229 | + req := TableAppendRequest{ |
| 230 | OriginalTableId: intToString(param.FileId), | 230 | OriginalTableId: intToString(param.FileId), |
| 231 | CheckoutTableFileUrl: param.FileUrl, | 231 | CheckoutTableFileUrl: param.FileUrl, |
| 232 | DatabaseTableName: param.Table.SQLName, | 232 | DatabaseTableName: param.Table.SQLName, |
| 233 | ColumnSchemas: DomainFieldsToColumnSchemas(param.Table.DataFields), | 233 | ColumnSchemas: DomainFieldsToColumnSchemas(param.Table.DataFields), |
| 234 | FieldSchemas: ToFieldSchemas(param.Table.DataFields), | 234 | FieldSchemas: ToFieldSchemas(param.Table.DataFields), |
| 235 | } | 235 | } |
| 236 | + if len(param.From) > 0 { | ||
| 237 | + req.ColumnSchemas = DomainFieldsToColumnSchemas(param.From) | ||
| 238 | + req.FieldSchemas = ToFieldSchemas(param.To) | ||
| 239 | + } | ||
| 240 | + return req | ||
| 236 | } | 241 | } |
| 237 | 242 | ||
| 238 | func intToString(i int) string { | 243 | func intToString(i int) string { |
| @@ -60,6 +60,8 @@ func (ptr *EditDataTableService) Edit(ctx *domain.Context, req domain.EditTableR | @@ -60,6 +60,8 @@ func (ptr *EditDataTableService) Edit(ctx *domain.Context, req domain.EditTableR | ||
| 60 | if response != nil && len(response.Fields) > 0 { | 60 | if response != nil && len(response.Fields) > 0 { |
| 61 | // 特殊处理修改类型错误 | 61 | // 特殊处理修改类型错误 |
| 62 | options := make([]redis.FileCacheOptionsFunc, 0) | 62 | options := make([]redis.FileCacheOptionsFunc, 0) |
| 63 | + cacheService := redis.NewFileCacheService() | ||
| 64 | + | ||
| 63 | if req.Action == "convert-column-type" { | 65 | if req.Action == "convert-column-type" { |
| 64 | var toType = req.Params["convertType"].(string) | 66 | var toType = req.Params["convertType"].(string) |
| 65 | var fieldName = req.ProcessFieldNames[0] | 67 | var fieldName = req.ProcessFieldNames[0] |
| @@ -68,9 +70,15 @@ func (ptr *EditDataTableService) Edit(ctx *domain.Context, req domain.EditTableR | @@ -68,9 +70,15 @@ func (ptr *EditDataTableService) Edit(ctx *domain.Context, req domain.EditTableR | ||
| 68 | } else { | 70 | } else { |
| 69 | options = append(options, redis.WithRemoveConvertTypeErrors([]redis.ConvertTypeError{{FieldName: fieldName, ErrMsg: errMsg, ToType: toType}})) | 71 | options = append(options, redis.WithRemoveConvertTypeErrors([]redis.ConvertTypeError{{FieldName: fieldName, ErrMsg: errMsg, ToType: toType}})) |
| 70 | } | 72 | } |
| 73 | + // 底层未返回更改类型以后的字段列表,手动修改缓存 | ||
| 74 | + if file, err := cacheService.UpdateField(redis.KeyTemporaryFileInfo(file.FileId), file, options...); err != nil { | ||
| 75 | + return nil, err | ||
| 76 | + } else { | ||
| 77 | + response.Fields = file.Fields | ||
| 78 | + } | ||
| 79 | + return response, nil | ||
| 71 | } | 80 | } |
| 72 | 81 | ||
| 73 | - cacheService := redis.NewFileCacheService() | ||
| 74 | if _, err := cacheService.Update(redis.KeyTemporaryFileInfo(file.FileId), file, response.Fields, response.Total, options...); err != nil { | 82 | if _, err := cacheService.Update(redis.KeyTemporaryFileInfo(file.FileId), file, response.Fields, response.Total, options...); err != nil { |
| 75 | return nil, err | 83 | return nil, err |
| 76 | } | 84 | } |
| @@ -31,6 +31,12 @@ func (ptr *PreviewDataTableService) Preview(ctx *domain.Context, fileId int, fie | @@ -31,6 +31,12 @@ func (ptr *PreviewDataTableService) Preview(ctx *domain.Context, fileId int, fie | ||
| 31 | if tempFile, _ := fileCache.Get(redis.KeyTemporaryFileInfo(fileId)); tempFile != nil { | 31 | if tempFile, _ := fileCache.Get(redis.KeyTemporaryFileInfo(fileId)); tempFile != nil { |
| 32 | isSourceFile = false | 32 | isSourceFile = false |
| 33 | fields = tempFile.Fields | 33 | fields = tempFile.Fields |
| 34 | + } else if file.FileInfo.TableId > 0 { | ||
| 35 | + tableRepository, _ := repository.NewTableRepository(ptr.transactionContext) | ||
| 36 | + table, _ := tableRepository.FindOne(map[string]interface{}{"tableId": file.FileInfo.TableId}) | ||
| 37 | + if table != nil { | ||
| 38 | + fields = table.Fields(false) | ||
| 39 | + } | ||
| 34 | } | 40 | } |
| 35 | 41 | ||
| 36 | // Load Data From Excel(python api) | 42 | // Load Data From Excel(python api) |
| @@ -52,8 +58,11 @@ func (ptr *PreviewDataTableService) Preview(ctx *domain.Context, fileId int, fie | @@ -52,8 +58,11 @@ func (ptr *PreviewDataTableService) Preview(ctx *domain.Context, fileId int, fie | ||
| 52 | if err != nil { | 58 | if err != nil { |
| 53 | return nil, err | 59 | return nil, err |
| 54 | } | 60 | } |
| 61 | + if len(fields) == 0 { | ||
| 62 | + fields = response.Fields | ||
| 63 | + } | ||
| 55 | cache := redis.NewFileCacheService() | 64 | cache := redis.NewFileCacheService() |
| 56 | - tempFile, err := cache.Update(redis.KeyTemporaryFileInfo(file.FileId), file, response.Fields, response.Total) | 65 | + tempFile, err := cache.Update(redis.KeyTemporaryFileInfo(file.FileId), file, fields, response.Total) |
| 57 | if err != nil { | 66 | if err != nil { |
| 58 | return nil, err | 67 | return nil, err |
| 59 | } | 68 | } |
| @@ -92,7 +101,15 @@ func (d *FilePreviewDto) Load(fileId int, m *domain.DataLoadDataTable, file *red | @@ -92,7 +101,15 @@ func (d *FilePreviewDto) Load(fileId int, m *domain.DataLoadDataTable, file *red | ||
| 92 | d.ObjectId = fileId | 101 | d.ObjectId = fileId |
| 93 | d.ObjectType = domain.ObjectFile | 102 | d.ObjectType = domain.ObjectFile |
| 94 | d.TableType = domain.ExcelTable.ToString() | 103 | d.TableType = domain.ExcelTable.ToString() |
| 95 | - d.Fields = m.Fields | 104 | + for i, f := range file.Fields { |
| 105 | + d.Fields = append(d.Fields, &domain.Field{ | ||
| 106 | + Index: i + 1, | ||
| 107 | + Name: f.Name, | ||
| 108 | + SQLName: f.Name, | ||
| 109 | + SQLType: f.SQLType, | ||
| 110 | + }) | ||
| 111 | + } | ||
| 112 | + | ||
| 96 | var fields []*domain.Field | 113 | var fields []*domain.Field |
| 97 | mapData := domain.ToFieldData(m.Fields, formatData(m.Data, func(s string) string { | 114 | mapData := domain.ToFieldData(m.Fields, formatData(m.Data, func(s string) string { |
| 98 | if s == "<NA>" { | 115 | if s == "<NA>" { |
| @@ -103,7 +120,7 @@ func (d *FilePreviewDto) Load(fileId int, m *domain.DataLoadDataTable, file *red | @@ -103,7 +120,7 @@ func (d *FilePreviewDto) Load(fileId int, m *domain.DataLoadDataTable, file *red | ||
| 103 | d.Data = domain.GripData(mapData, int64(m.Total)) | 120 | d.Data = domain.GripData(mapData, int64(m.Total)) |
| 104 | d.PageNumber = m.PageNumber | 121 | d.PageNumber = m.PageNumber |
| 105 | 122 | ||
| 106 | - for _, f := range m.Fields { | 123 | + for _, f := range file.Fields { |
| 107 | copyField := f.Copy() | 124 | copyField := f.Copy() |
| 108 | for _, e := range file.ConvertTypeErrors { | 125 | for _, e := range file.ConvertTypeErrors { |
| 109 | if e.FieldName == copyField.Name { | 126 | if e.FieldName == copyField.Name { |
| @@ -43,6 +43,7 @@ func (ptr *PGLogService) Log(logType domain.LogType, sourceId int, logEntry Log) | @@ -43,6 +43,7 @@ func (ptr *PGLogService) Log(logType domain.LogType, sourceId int, logEntry Log) | ||
| 43 | OperatorName: entry.OperatorName, | 43 | OperatorName: entry.OperatorName, |
| 44 | CreatedAt: time.Now(), | 44 | CreatedAt: time.Now(), |
| 45 | Context: logEntry.Context(), | 45 | Context: logEntry.Context(), |
| 46 | + LogTime: entry.LogTime, | ||
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | if v, ok := logEntry.Context().GetValue(domain.ContextWithLogLevel); ok { | 49 | if v, ok := logEntry.Context().GetValue(domain.ContextWithLogLevel); ok { |
| @@ -59,7 +59,25 @@ func (ptr *AppendDataToTableService) AppendData(ctx *domain.Context, fileId int, | @@ -59,7 +59,25 @@ func (ptr *AppendDataToTableService) AppendData(ctx *domain.Context, fileId int, | ||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | // 通知底层进行追加数据 | 61 | // 通知底层进行追加数据 |
| 62 | - if _, err = ByteCore.AppendData(domain.ReqAppendData{Table: table, FileId: fileId, FileUrl: file.FileInfo.Url}); err != nil { | 62 | + requestData := domain.ReqAppendData{Table: table, FileId: fileId, FileUrl: file.FileInfo.Url} |
| 63 | + if len(mappingFields) > 0 { | ||
| 64 | + for _, m := range mappingFields { | ||
| 65 | + if len(m.VerifiedFileFieldName) == 0 { | ||
| 66 | + continue | ||
| 67 | + } | ||
| 68 | + fromField, ok := excelTable.MatchField(&domain.Field{Name: m.VerifiedFileFieldName}) | ||
| 69 | + if !ok { | ||
| 70 | + continue | ||
| 71 | + } | ||
| 72 | + toField, ok := table.MatchField(m.MainTableField) | ||
| 73 | + if !ok { | ||
| 74 | + continue | ||
| 75 | + } | ||
| 76 | + requestData.To = append(requestData.To, toField) | ||
| 77 | + requestData.From = append(requestData.From, fromField) | ||
| 78 | + } | ||
| 79 | + } | ||
| 80 | + if _, err = ByteCore.AppendData(requestData); err != nil { | ||
| 63 | return nil, err | 81 | return nil, err |
| 64 | } | 82 | } |
| 65 | return map[string]interface{}{ | 83 | return map[string]interface{}{ |
| @@ -113,7 +113,54 @@ func (s *FileCacheService) Update(key string, file *domain.File, fields []*domai | @@ -113,7 +113,54 @@ func (s *FileCacheService) Update(key string, file *domain.File, fields []*domai | ||
| 113 | response.AddConvertTypeError(options.AddConvertTypeErrors[i]) | 113 | response.AddConvertTypeError(options.AddConvertTypeErrors[i]) |
| 114 | } | 114 | } |
| 115 | for i := range options.RemoveConvertTypeErrors { | 115 | for i := range options.RemoveConvertTypeErrors { |
| 116 | + convertType := options.RemoveConvertTypeErrors[i] | ||
| 116 | response.RemoveConvertTypeError(options.RemoveConvertTypeErrors[i]) | 117 | response.RemoveConvertTypeError(options.RemoveConvertTypeErrors[i]) |
| 118 | + for j := range response.Fields { | ||
| 119 | + if response.Fields[j].Name == convertType.FieldName { | ||
| 120 | + response.Fields[j].SQLType = convertType.ToType | ||
| 121 | + break | ||
| 122 | + } | ||
| 123 | + } | ||
| 124 | + } | ||
| 125 | + | ||
| 126 | + err = ZeroCoreRedis.Setex(key, json.MarshalToString(response), TemporaryFileExpire) | ||
| 127 | + if err != nil { | ||
| 128 | + return nil, err | ||
| 129 | + } | ||
| 130 | + return response, err | ||
| 131 | +} | ||
| 132 | + | ||
| 133 | +func (s *FileCacheService) UpdateField(key string, file *domain.File, errors ...FileCacheOptionsFunc) (*TemporaryFileInfo, error) { | ||
| 134 | + ok, err := ZeroCoreRedis.Exists(key) | ||
| 135 | + var response = &TemporaryFileInfo{} | ||
| 136 | + if err != nil { | ||
| 137 | + return response, err | ||
| 138 | + } | ||
| 139 | + if !ok { | ||
| 140 | + return nil, fmt.Errorf("文件不存在") | ||
| 141 | + } | ||
| 142 | + data, err := ZeroCoreRedis.Get(key) | ||
| 143 | + if err != nil { | ||
| 144 | + return nil, err | ||
| 145 | + } | ||
| 146 | + err = json.UnmarshalFromString(data, response) | ||
| 147 | + if err != nil { | ||
| 148 | + return nil, err | ||
| 149 | + } | ||
| 150 | + | ||
| 151 | + options := NewFileCacheOptions(errors...) | ||
| 152 | + for i := range options.AddConvertTypeErrors { | ||
| 153 | + response.AddConvertTypeError(options.AddConvertTypeErrors[i]) | ||
| 154 | + } | ||
| 155 | + for i := range options.RemoveConvertTypeErrors { | ||
| 156 | + convertType := options.RemoveConvertTypeErrors[i] | ||
| 157 | + response.RemoveConvertTypeError(options.RemoveConvertTypeErrors[i]) | ||
| 158 | + for j := range response.Fields { | ||
| 159 | + if response.Fields[j].Name == convertType.FieldName { | ||
| 160 | + response.Fields[j].SQLType = convertType.ToType | ||
| 161 | + break | ||
| 162 | + } | ||
| 163 | + } | ||
| 117 | } | 164 | } |
| 118 | 165 | ||
| 119 | err = ZeroCoreRedis.Setex(key, json.MarshalToString(response), TemporaryFileExpire) | 166 | err = ZeroCoreRedis.Setex(key, json.MarshalToString(response), TemporaryFileExpire) |
| @@ -31,6 +31,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | @@ -31,6 +31,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | ||
| 31 | "created_at", | 31 | "created_at", |
| 32 | "entry", | 32 | "entry", |
| 33 | "context", | 33 | "context", |
| 34 | + "log_time", | ||
| 34 | } | 35 | } |
| 35 | insertFieldsSnippet := sqlbuilder.SqlFieldsSnippet(sqlbuilder.RemoveSqlFields(sqlBuildFields, "log_id")) | 36 | insertFieldsSnippet := sqlbuilder.SqlFieldsSnippet(sqlbuilder.RemoveSqlFields(sqlBuildFields, "log_id")) |
| 36 | insertPlaceHoldersSnippet := sqlbuilder.SqlPlaceHoldersSnippet(sqlbuilder.RemoveSqlFields(sqlBuildFields, "log_id")) | 37 | insertPlaceHoldersSnippet := sqlbuilder.SqlPlaceHoldersSnippet(sqlbuilder.RemoveSqlFields(sqlBuildFields, "log_id")) |
| @@ -52,6 +53,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | @@ -52,6 +53,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | ||
| 52 | &log.CreatedAt, | 53 | &log.CreatedAt, |
| 53 | &log.Entry, | 54 | &log.Entry, |
| 54 | &log.Context, | 55 | &log.Context, |
| 56 | + &log.LogTime, | ||
| 55 | ), | 57 | ), |
| 56 | fmt.Sprintf("INSERT INTO metadata.logs (%s) VALUES (%s) RETURNING %s", insertFieldsSnippet, insertPlaceHoldersSnippet, returningFieldsSnippet), | 58 | fmt.Sprintf("INSERT INTO metadata.logs (%s) VALUES (%s) RETURNING %s", insertFieldsSnippet, insertPlaceHoldersSnippet, returningFieldsSnippet), |
| 57 | log.LogType, | 59 | log.LogType, |
| @@ -64,6 +66,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | @@ -64,6 +66,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | ||
| 64 | log.CreatedAt, | 66 | log.CreatedAt, |
| 65 | log.Entry, | 67 | log.Entry, |
| 66 | log.Context, | 68 | log.Context, |
| 69 | + log.LogTime, | ||
| 67 | ); err != nil { | 70 | ); err != nil { |
| 68 | return log, err | 71 | return log, err |
| 69 | } | 72 | } |
| @@ -81,6 +84,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | @@ -81,6 +84,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | ||
| 81 | &log.CreatedAt, | 84 | &log.CreatedAt, |
| 82 | &log.Entry, | 85 | &log.Entry, |
| 83 | &log.Context, | 86 | &log.Context, |
| 87 | + &log.LogTime, | ||
| 84 | ), | 88 | ), |
| 85 | fmt.Sprintf("UPDATE metadata.logs SET %s WHERE log_id=? RETURNING %s", updateFieldsSnippet, returningFieldsSnippet), | 89 | fmt.Sprintf("UPDATE metadata.logs SET %s WHERE log_id=? RETURNING %s", updateFieldsSnippet, returningFieldsSnippet), |
| 86 | log.LogType, | 90 | log.LogType, |
| @@ -93,6 +97,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | @@ -93,6 +97,7 @@ func (repository *LogRepository) Save(log *domain.Log) (*domain.Log, error) { | ||
| 93 | log.CreatedAt, | 97 | log.CreatedAt, |
| 94 | log.Entry, | 98 | log.Entry, |
| 95 | log.Context, | 99 | log.Context, |
| 100 | + log.LogTime, | ||
| 96 | log.Identify(), | 101 | log.Identify(), |
| 97 | ); err != nil { | 102 | ); err != nil { |
| 98 | return log, err | 103 | return log, err |
| @@ -142,7 +142,7 @@ var opMap = map[string]string{ | @@ -142,7 +142,7 @@ var opMap = map[string]string{ | ||
| 142 | 142 | ||
| 143 | func (c Condition) formatByOp(op string, val interface{}) string { | 143 | func (c Condition) formatByOp(op string, val interface{}) string { |
| 144 | if op == "like" || op == "not like" { | 144 | if op == "like" || op == "not like" { |
| 145 | - return fmt.Sprintf("'%%%v%%'", AssertString(val)) | 145 | + return fmt.Sprintf("'%%%s%%'", AssertString(val)) |
| 146 | } | 146 | } |
| 147 | return c.Arg(val) | 147 | return c.Arg(val) |
| 148 | } | 148 | } |
| @@ -155,7 +155,12 @@ func (c Condition) InArgs(args interface{}) string { | @@ -155,7 +155,12 @@ func (c Condition) InArgs(args interface{}) string { | ||
| 155 | 155 | ||
| 156 | func (c Condition) Arg(args interface{}) string { | 156 | func (c Condition) Arg(args interface{}) string { |
| 157 | bytes := make([]byte, 0) | 157 | bytes := make([]byte, 0) |
| 158 | - bytes = appendValue(bytes, reflect.ValueOf(args)) | 158 | + v := reflect.ValueOf(args) |
| 159 | + if v.Kind() == reflect.Int || v.Kind() == reflect.Int64 || v.Kind() == reflect.Float64 { | ||
| 160 | + bytes = append(bytes, []byte(AssertString(args))...) | ||
| 161 | + return string(bytes) | ||
| 162 | + } | ||
| 163 | + bytes = appendValue(bytes, v) | ||
| 159 | return string(bytes) | 164 | return string(bytes) |
| 160 | } | 165 | } |
| 161 | 166 | ||
| @@ -171,7 +176,6 @@ func appendIn(b []byte, slice reflect.Value) []byte { | @@ -171,7 +176,6 @@ func appendIn(b []byte, slice reflect.Value) []byte { | ||
| 171 | if elem.Kind() == reflect.Interface { | 176 | if elem.Kind() == reflect.Interface { |
| 172 | elem = elem.Elem() | 177 | elem = elem.Elem() |
| 173 | } | 178 | } |
| 174 | - | ||
| 175 | if elem.Kind() == reflect.Slice { | 179 | if elem.Kind() == reflect.Slice { |
| 176 | //b = appendIn(b, elem) | 180 | //b = appendIn(b, elem) |
| 177 | } else { | 181 | } else { |
-
请 注册 或 登录 后发表评论